/* For ELF64 the definitions are the same. */ #ifndef ELF64_ST_VISIBILITY #define ELF64_ST_VISIBILITY(o) ELF32_ST_VISIBILITY (o) #endif
/* How to extract information held in the st_other field. */ #ifndef GELF_ST_VISIBILITY #define GELF_ST_VISIBILITY(val) ELF64_ST_VISIBILITY (val) #endif
ri->sorted = calloc(ri->nr_entries, sz); if (!ri->sorted) return -1; for (i = 0; i < ri->nr_entries; i++)
ri->sorted[i] = i;
qsort_r(ri->sorted, ri->nr_entries, sz, rel_cmp, ri); return 0;
}
/* * For x86_64, the GNU linker is putting IFUNC information in the relocation * addend.
*/ staticbool addend_may_be_ifunc(GElf_Ehdr *ehdr, struct rel_info *ri)
{ return ehdr->e_machine == EM_X86_64 && ri->is_rela &&
GELF_R_TYPE(ri->rela.r_info) == R_X86_64_IRELATIVE;
}
if (elf_read_program_header(elf, addr, &phdr)) returnfalse;
addr -= phdr.p_vaddr - phdr.p_offset;
sym = dso__find_symbol_nocache(dso, addr);
/* Expecting the address to be an IFUNC or IFUNC alias */ if (!sym || sym->start != addr || (sym->type != STT_GNU_IFUNC && !sym->ifunc_alias)) returnfalse;
if (!di->plt_got_data || !di->dynstr_data || !di->dynsym_data || !di->rela_dyn_data) return;
/* Sort into offset order */
sort_rela_dyn(di);
}
/* Get instruction displacement from a plt entry for x86_64 */ static u32 get_x86_64_plt_disp(const u8 *p)
{
u8 endbr64[] = {0xf3, 0x0f, 0x1e, 0xfa}; int n = 0;
/* Skip endbr64 */ if (!memcmp(p, endbr64, sizeof(endbr64)))
n += sizeof(endbr64); /* Skip bnd prefix */ if (p[n] == 0xf2)
n += 1; /* jmp with 4-byte displacement */ if (p[n] == 0xff && p[n + 1] == 0x25) {
u32 disp;
n += 2; /* Also add offset from start of entry to end of instruction */
memcpy(&disp, p + n, sizeof(disp)); return n + 4 + le32toh(disp);
} return 0;
}
disp = get_x86_64_plt_disp(di->plt_got_data->d_buf + i); if (!disp) returnfalse;
/* Compute target offset of the .plt.got entry */
vi.offset = shdr->sh_offset + di->plt_got_data->d_off + i + disp;
/* Find that offset in .rela.dyn (sorted by offset) */
vr = bsearch(&vi, di->sorted, di->nr_entries, sizeof(di->sorted[0]), cmp_offset); if (!vr) returnfalse;
/* Get the associated symbol */
gelf_getsym(di->dynsym_data, vr->sym_idx, &sym);
sym_name = elf_sym__name(&sym, di->dynstr_data);
demangled = dso__demangle_sym(di->dso, /*kmodule=*/0, sym_name); if (demangled != NULL)
sym_name = demangled;
if (ehdr->e_machine == EM_X86_64)
get_rela_dyn_info(elf, ehdr, &di, scn);
for (i = 0; i < shdr.sh_size; i += shdr.sh_entsize) { if (!get_plt_got_name(&shdr, i, &di, buf, buf_sz))
snprintf(buf, buf_sz, "offset_%#" PRIx64 "@plt", (u64)shdr.sh_offset + i);
sym = symbol__new(shdr.sh_offset + i, shdr.sh_entsize, STB_GLOBAL, STT_FUNC, buf); if (!sym) goto out;
symbols__insert(dso__symbols(dso), sym);
}
err = 0;
out:
exit_rela_dyn(&di); return err;
}
/* * We need to check if we have a .dynsym, so that we can handle the * .plt, synthesizing its symbols, that aren't on the symtabs (be it * .dynsym or .symtab). * And always look at the original dso, not at debuginfo packages, that * have the PLT data stripped out (shdr_rel_plt.sh_type == SHT_NOBITS).
*/ int dso__synthesize_plt_symbols(struct dso *dso, struct symsrc *ss)
{
uint32_t idx;
GElf_Sym sym;
u64 plt_offset, plt_header_size, plt_entry_size;
GElf_Shdr shdr_plt, plt_sec_shdr; struct symbol *f, *plt_sym;
GElf_Shdr shdr_rel_plt, shdr_dynsym;
Elf_Data *syms, *symstrs;
Elf_Scn *scn_plt_rel, *scn_symstrs, *scn_dynsym;
GElf_Ehdr ehdr; char sympltname[1024];
Elf *elf; int nr = 0, err = -1; struct rel_info ri = { .is_rela = false }; bool lazy_plt;
elf = ss->elf;
ehdr = ss->ehdr;
if (!elf_section_by_name(elf, &ehdr, &shdr_plt, ".plt", NULL)) return 0;
/* * A symbol from a previous section (e.g. .init) can have been expanded * by symbols__fixup_end() to overlap .plt. Truncate it before adding * a symbol for .plt header.
*/
f = dso__find_symbol_nocache(dso, shdr_plt.sh_offset); if (f && f->start < shdr_plt.sh_offset && f->end > shdr_plt.sh_offset)
f->end = shdr_plt.sh_offset;
if (!get_plt_sizes(dso, &ehdr, &shdr_plt, &plt_header_size, &plt_entry_size)) return 0;
/* Add a symbol for .plt header */
plt_sym = symbol__new(shdr_plt.sh_offset, plt_header_size, STB_GLOBAL, STT_FUNC, ".plt"); if (!plt_sym) goto out_elf_end;
symbols__insert(dso__symbols(dso), plt_sym);
/* Only x86 has .plt.got */ if (machine_is_x86(ehdr.e_machine) &&
dso__synthesize_plt_got_symbols(dso, elf, &ehdr, sympltname, sizeof(sympltname))) goto out_elf_end;
/* Only x86 has .plt.sec */ if (machine_is_x86(ehdr.e_machine) &&
elf_section_by_name(elf, &ehdr, &plt_sec_shdr, ".plt.sec", NULL)) { if (!get_plt_sizes(dso, &ehdr, &plt_sec_shdr, &plt_header_size, &plt_entry_size)) return 0; /* Extend .plt symbol to entire .plt */
plt_sym->end = plt_sym->start + shdr_plt.sh_size; /* Use .plt.sec offset */
plt_offset = plt_sec_shdr.sh_offset;
lazy_plt = false;
} else {
plt_offset = shdr_plt.sh_offset;
lazy_plt = true;
}
if (shdr_rel_plt.sh_type != SHT_RELA &&
shdr_rel_plt.sh_type != SHT_REL) return 0;
if (!shdr_rel_plt.sh_link) return 0;
if (shdr_rel_plt.sh_link == ss->dynsym_idx) {
scn_dynsym = ss->dynsym;
shdr_dynsym = ss->dynshdr;
} elseif (shdr_rel_plt.sh_link == ss->symtab_idx) { /* * A static executable can have a .plt due to IFUNCs, in which * case .symtab is used not .dynsym.
*/
scn_dynsym = ss->symtab;
shdr_dynsym = ss->symshdr;
} else { goto out_elf_end;
}
if (!scn_dynsym) return 0;
/* * Fetch the relocation section to find the idxes to the GOT * and the symbols in the .dynsym they refer to.
*/
ri.reldata = elf_getdata(scn_plt_rel, NULL); if (!ri.reldata) goto out_elf_end;
syms = elf_getdata(scn_dynsym, NULL); if (syms == NULL) goto out_elf_end;
scn_symstrs = elf_getscn(elf, shdr_dynsym.sh_link); if (scn_symstrs == NULL) goto out_elf_end;
symstrs = elf_getdata(scn_symstrs, NULL); if (symstrs == NULL) goto out_elf_end;
if (lazy_plt) { /* * Assume a .plt with the same number of entries as the number * of relocation entries is not lazy and does not have a header.
*/ if (ri.nr_entries * plt_entry_size == shdr_plt.sh_size)
dso__delete_symbol(dso, plt_sym); else
plt_offset += plt_header_size;
}
/* * x86 doesn't insert IFUNC relocations in .plt order, so sort to get * back in order.
*/ if (machine_is_x86(ehdr.e_machine) && sort_rel(&ri)) goto out_elf_end;
bool elf__needs_adjust_symbols(GElf_Ehdr ehdr)
{ /* * Usually vmlinux is an ELF file with type ET_EXEC for most * architectures; except Arm64 kernel is linked with option * '-share', so need to check type ET_DYN.
*/ return ehdr.e_type == ET_EXEC || ehdr.e_type == ET_REL ||
ehdr.e_type == ET_DYN;
}
/* * Some executable module sections like .noinstr.text might be laid out with * .text so they can use the same mapping (memory address to file offset). * Check if that is the case. Refer to kernel layout_sections(). Return the * maximum offset.
*/ static u64 max_text_section(Elf *elf, GElf_Ehdr *ehdr)
{
Elf_Scn *sec = NULL;
GElf_Shdr shdr;
u64 offs = 0;
/* Doesn't work for some arch */ if (ehdr->e_machine == EM_PARISC ||
ehdr->e_machine == EM_ALPHA) return 0;
/* ELF is corrupted/truncated, avoid calling elf_strptr. */ if (!elf_rawdata(elf_getscn(elf, ehdr->e_shstrndx), NULL)) return 0;
while ((sec = elf_nextscn(elf, sec)) != NULL) { char *sec_name;
if (!gelf_getshdr(sec, &shdr)) break;
if (!is_exe_text(shdr.sh_flags)) continue;
/* .init and .exit sections are not placed with .text */
sec_name = elf_strptr(elf, ehdr->e_shstrndx, shdr.sh_name); if (!sec_name ||
strstarts(sec_name, ".init") ||
strstarts(sec_name, ".exit")) break;
/* Must be next to previous, assumes .text is first */ if (offs && PERF_ALIGN(offs, shdr.sh_addralign ?: 1) != shdr.sh_offset) break;
offs = shdr.sh_offset + shdr.sh_size;
}
return offs;
}
/** * ref_reloc_sym_not_found - has kernel relocation symbol been found. * @kmap: kernel maps and relocation reference symbol * * This function returns %true if we are dealing with the kernel maps and the * relocation reference symbol has not yet been found. Otherwise %false is * returned.
*/ staticbool ref_reloc_sym_not_found(struct kmap *kmap)
{ return kmap && kmap->ref_reloc_sym && kmap->ref_reloc_sym->name &&
!kmap->ref_reloc_sym->unrelocated_addr;
}
/** * ref_reloc - kernel relocation offset. * @kmap: kernel maps and relocation reference symbol * * This function returns the offset of kernel addresses as determined by using * the relocation reference symbol i.e. if the kernel has not been relocated * then the return value is zero.
*/ static u64 ref_reloc(struct kmap *kmap)
{ if (kmap && kmap->ref_reloc_sym &&
kmap->ref_reloc_sym->unrelocated_addr) return kmap->ref_reloc_sym->addr -
kmap->ref_reloc_sym->unrelocated_addr; return 0;
}
/* Adjust symbol to map to file offset */ if (adjust_kernel_syms)
sym->st_value -= shdr->sh_addr - shdr->sh_offset;
if (strcmp(section_name, (dso__short_name(curr_dso) + dso__short_name_len(dso))) == 0) return 0;
if (strcmp(section_name, ".text") == 0) { /* * The initial kernel mapping is based on * kallsyms and identity maps. Overwrite it to * map to the kernel dso.
*/ if (*remap_kernel && dso__kernel(dso) && !kmodule) {
*remap_kernel = false;
map__set_start(map, shdr->sh_addr + ref_reloc(kmap));
map__set_end(map, map__start(map) + shdr->sh_size);
map__set_pgoff(map, shdr->sh_offset);
map__set_mapping_type(map, MAPPING_TYPE__DSO); /* Ensure maps are correctly ordered */ if (kmaps) { int err; struct map *tmp = map__get(map);
/* * The initial module mapping is based on * /proc/modules mapped to offset zero. * Overwrite it to map to the module dso.
*/ if (*remap_kernel && kmodule) {
*remap_kernel = false;
map__set_pgoff(map, shdr->sh_offset);
}
/* * perf does not record module section addresses except for .text, but * some sections can use the same mapping as .text.
*/ if (kmodule && adjust_kernel_syms && is_exe_text(shdr->sh_flags) &&
shdr->sh_offset <= max_text_sh_offset) {
dso__put(*curr_dsop);
*curr_dsop = dso__get(dso); return 0;
}
if (runtime_ss->opdsec)
opddata = elf_rawdata(runtime_ss->opdsec, NULL);
syms = elf_getdata(sec, NULL); if (syms == NULL) goto out_elf_end;
sec = elf_getscn(elf, shdr.sh_link); if (sec == NULL) goto out_elf_end;
symstrs = elf_getdata(sec, NULL); if (symstrs == NULL) goto out_elf_end;
sec_strndx = elf_getscn(runtime_ss->elf, runtime_ss->ehdr.e_shstrndx); if (sec_strndx == NULL) goto out_elf_end;
secstrs_run = elf_getdata(sec_strndx, NULL); if (secstrs_run == NULL) goto out_elf_end;
sec_strndx = elf_getscn(elf, ehdr.e_shstrndx); if (sec_strndx == NULL) goto out_elf_end;
secstrs_sym = elf_getdata(sec_strndx, NULL); if (secstrs_sym == NULL) goto out_elf_end;
nr_syms = shdr.sh_size / shdr.sh_entsize;
memset(&sym, 0, sizeof(sym));
/* * The kernel relocation symbol is needed in advance in order to adjust * kernel maps correctly.
*/ if (ref_reloc_sym_not_found(kmap)) {
elf_symtab__for_each_symbol(syms, nr_syms, idx, sym) { constchar *elf_name = elf_sym__name(&sym, symstrs);
/* * Handle any relocation of vdso necessary because older kernels * attempted to prelink vdso to its virtual address.
*/ if (dso__is_vdso(dso))
map__set_reloc(map, map__start(map) - dso__text_offset(dso));
dso__set_adjust_symbols(dso, runtime_ss->adjust_symbols || ref_reloc(kmap)); /* * Initial kernel and module mappings do not map to the dso. * Flag the fixups.
*/ if (dso__kernel(dso)) {
remap_kernel = true;
adjust_kernel_syms = dso__adjust_symbols(dso);
}
if (kmodule && adjust_kernel_syms)
max_text_sh_offset = max_text_section(runtime_ss->elf, &runtime_ss->ehdr);
/* * When loading symbols in a data mapping, ABS symbols (which * has a value of SHN_ABS in its st_shndx) failed at * elf_getscn(). And it marks the loading as a failure so * already loaded symbols cannot be fixed up. * * I'm not sure what should be done. Just ignore them for now. * - Namhyung Kim
*/ if (sym.st_shndx == SHN_ABS) continue;
sec = elf_getscn(syms_ss->elf, sym.st_shndx); if (!sec) goto out_elf_end;
gelf_getshdr(sec, &shdr);
/* * If the attribute bit SHF_ALLOC is not set, the section * doesn't occupy memory during process execution. * E.g. ".gnu.warning.*" section is used by linker to generate * warnings when calling deprecated functions, the symbols in * the section aren't loaded to memory during process execution, * so skip them.
*/ if (!(shdr.sh_flags & SHF_ALLOC)) continue;
secstrs = secstrs_sym;
/* * We have to fallback to runtime when syms' section header has * NOBITS set. NOBITS results in file offset (sh_offset) not * being incremented. So sh_offset used below has different * values for syms (invalid) and runtime (valid).
*/ if (shdr.sh_type == SHT_NOBITS) {
sec = elf_getscn(runtime_ss->elf, sym.st_shndx); if (!sec) goto out_elf_end;
if (is_label && !elf_sec__filter(&shdr, secstrs)) continue;
section_name = elf_sec__name(&shdr, secstrs);
/* On ARM, symbols for thumb functions have 1 added to
* the symbol address as a flag - remove it */ if ((ehdr.e_machine == EM_ARM) &&
(GELF_ST_TYPE(sym.st_info) == STT_FUNC) &&
(sym.st_value & 1))
--sym.st_value;
/* * For misannotated, zeroed, ASM function sizes.
*/ if (nr > 0) {
symbols__fixup_end(dso__symbols(dso), false);
symbols__fixup_duplicate(dso__symbols(dso)); if (kmap) { /* * We need to fixup this here too because we create new * maps here, for things like vsyscall sections.
*/
maps__fixup_end(kmaps);
}
} return nr;
out_elf_end:
dso__put(curr_dso); return -1;
}
int dso__load_sym(struct dso *dso, struct map *map, struct symsrc *syms_ss, struct symsrc *runtime_ss, int kmodule)
{ int nr = 0; int err = -1;
/* * Modules may already have symbols from kallsyms, but those symbols * have the wrong values for the dso maps, so remove them.
*/ if (kmodule && syms_ss->symtab)
symbols__delete(dso__symbols(dso));
if (!syms_ss->symtab) { /* * If the vmlinux is stripped, fail so we will fall back * to using kallsyms. The vmlinux runtime symbols aren't * of much use.
*/ if (dso__kernel(dso)) return err;
} else {
err = dso__load_sym_internal(dso, map, syms_ss, runtime_ss,
kmodule, 0); if (err < 0) return err;
nr = err;
}
if (syms_ss->dynsym) {
err = dso__load_sym_internal(dso, map, syms_ss, runtime_ss,
kmodule, 1); if (err < 0) return err;
nr += err;
}
/* * The .gnu_debugdata is a special situation: it contains a symbol * table, but the runtime file may also contain dynsym entries which are * not present there. We need to load both.
*/ if (syms_ss->type == DSO_BINARY_TYPE__GNU_DEBUGDATA && runtime_ss->dynsym) {
err = dso__load_sym_internal(dso, map, runtime_ss, runtime_ss,
kmodule, 1); if (err < 0) return err;
nr += err;
}
return nr;
}
staticint elf_read_maps(Elf *elf, bool exe, mapfn_t mapfn, void *data)
{
GElf_Phdr phdr;
size_t i, phdrnum; int err;
u64 sz;
if (elf_getphdrnum(elf, &phdrnum)) return -1;
for (i = 0; i < phdrnum; i++) { if (gelf_getphdr(elf, i, &phdr) == NULL) return -1; if (phdr.p_type != PT_LOAD) continue; if (exe) { if (!(phdr.p_flags & PF_X)) continue;
} else { if (!(phdr.p_flags & PF_R)) continue;
}
sz = min(phdr.p_memsz, phdr.p_filesz); if (!sz) continue;
err = mapfn(phdr.p_vaddr, sz, phdr.p_offset, data); if (err) return err;
} return 0;
}
int file__read_maps(int fd, bool exe, mapfn_t mapfn, void *data, bool *is_64_bit)
{ int err;
Elf *elf;
staticint copy_bytes(int from, off_t from_offs, int to, off_t to_offs, u64 len)
{
ssize_t r;
size_t n; int err = -1; char *buf = malloc(page_size);
if (buf == NULL) return -1;
if (lseek(to, to_offs, SEEK_SET) != to_offs) goto out;
if (lseek(from, from_offs, SEEK_SET) != from_offs) goto out;
while (len) {
n = page_size; if (len < n)
n = len; /* Use read because mmap won't work on proc files */
r = read(from, buf, n); if (r < 0) goto out; if (!r) break;
n = r;
r = write(to, buf, n); if (r < 0) goto out; if ((size_t)r != n) goto out;
len -= n;
}
err = 0;
out:
free(buf); return err;
}
struct kcore { int fd; int elfclass;
Elf *elf;
GElf_Ehdr ehdr;
};
/** * kcore_copy - copy kallsyms, modules and kcore from one directory to another. * @from_dir: from directory * @to_dir: to directory * * This function copies kallsyms, modules and kcore files from one directory to * another. kallsyms and modules are copied entirely. Only code segments are * copied from kcore. It is assumed that two segments suffice: one for the * kernel proper and one for all the modules. The code segments are determined * from kallsyms and modules files. The kernel map starts at _stext or the * lowest function symbol, and ends at _etext or the highest function symbol. * The module map starts at the lowest module address and ends at the highest * module symbol. Start addresses are rounded down to the nearest page. End * addresses are rounded up to the nearest page. An extra page is added to the * highest kernel symbol and highest module symbol to, hopefully, encompass that * symbol too. Because it contains only code sections, the resulting kcore is * unusual. One significant peculiarity is that the mapping (start -> pgoff) * is not the same for the kernel map and the modules map. That happens because * the data is copied adjacently whereas the original kcore has gaps. Finally, * kallsyms file is compared with its copy to check that modules have not been * loaded or unloaded while the copies were taking place. * * Return: %0 on success, %-1 on failure.
*/ int kcore_copy(constchar *from_dir, constchar *to_dir)
{ struct kcore kcore; struct kcore extract; int idx = 0, err = -1;
off_t offset, sz; struct kcore_copy_info kci = { .stext = 0, }; char kcore_filename[PATH_MAX]; char extract_filename[PATH_MAX]; struct phdr_data *p;
/** * populate_sdt_note : Parse raw data and identify SDT note * @elf: elf of the opened file * @data: raw data of a section with description offset applied * @len: note description size * @type: type of the note * @sdt_notes: List to add the SDT note * * Responsible for parsing the @data in section .note.stapsdt in @elf and * if its an SDT note, it appends to @sdt_notes list.
*/ staticint populate_sdt_note(Elf **elf, constchar *data, size_t len, struct list_head *sdt_notes)
{ constchar *provider, *name, *args; struct sdt_note *tmp = NULL;
GElf_Ehdr ehdr;
GElf_Shdr shdr; int ret = -EINVAL;
union {
Elf64_Addr a64[NR_ADDR];
Elf32_Addr a32[NR_ADDR];
} buf;
Die Informationen auf dieser Webseite wurden
nach bestem Wissen sorgfältig zusammengestellt. Es wird jedoch weder Vollständigkeit, noch Richtigkeit,
noch Qualität der bereit gestellten Informationen zugesichert.
Bemerkung:
Die farbliche Syntaxdarstellung und die Messung sind noch experimentell.