/* * Check whether both entries refer to the same target: * do the cheapest checks first. * If the 'add' or 'br' opcodes are different, then the target * cannot be the same.
*/ if (a->add != b->add || a->br != b->br) returnfalse;
p = ALIGN_DOWN((u64)a, SZ_4K);
q = ALIGN_DOWN((u64)b, SZ_4K);
/* * If the 'adrp' opcodes are the same then we just need to check * that they refer to the same 4k region.
*/ if (a->adrp == b->adrp && p == q) returntrue;
u64 module_emit_plt_entry(struct module *mod, Elf64_Shdr *sechdrs, void *loc, const Elf64_Rela *rela,
Elf64_Sym *sym)
{ struct mod_plt_sec *pltsec = !within_module_init((unsignedlong)loc, mod) ?
&mod->arch.core : &mod->arch.init; struct plt_entry *plt = (struct plt_entry *)sechdrs[pltsec->plt_shndx].sh_addr; int i = pltsec->plt_num_entries; int j = i - 1;
u64 val = sym->st_value + rela->r_addend;
if (is_forbidden_offset_for_adrp(&plt[i].adrp))
i++;
plt[i] = get_plt_entry(val, &plt[i]);
/* * Check if the entry we just created is a duplicate. Given that the * relocations are sorted, this will be the last entry we allocated. * (if one exists).
*/ if (j >= 0 && plt_entries_equal(plt + i, plt + j)) return (u64)&plt[j];
pltsec->plt_num_entries += i - j; if (WARN_ON(pltsec->plt_num_entries > pltsec->plt_max_entries)) return 0;
staticint cmp_rela(constvoid *a, constvoid *b)
{ const Elf64_Rela *x = a, *y = b; int i;
/* sort by type, symbol index and addend */
i = cmp_3way(ELF64_R_TYPE(x->r_info), ELF64_R_TYPE(y->r_info)); if (i == 0)
i = cmp_3way(ELF64_R_SYM(x->r_info), ELF64_R_SYM(y->r_info)); if (i == 0)
i = cmp_3way(x->r_addend, y->r_addend); return i;
}
staticbool duplicate_rel(const Elf64_Rela *rela, int num)
{ /* * Entries are sorted by type, symbol index and addend. That means * that, if a duplicate entry exists, it must be in the preceding * slot.
*/ return num > 0 && cmp_rela(rela + num, rela + num - 1) == 0;
}
staticunsignedint count_plts(Elf64_Sym *syms, Elf64_Rela *rela, int num,
Elf64_Word dstidx, Elf_Shdr *dstsec)
{ unsignedint ret = 0;
Elf64_Sym *s; int i;
for (i = 0; i < num; i++) {
u64 min_align;
switch (ELF64_R_TYPE(rela[i].r_info)) { case R_AARCH64_JUMP26: case R_AARCH64_CALL26: /* * We only have to consider branch targets that resolve * to symbols that are defined in a different section. * This is not simply a heuristic, it is a fundamental * limitation, since there is no guaranteed way to emit * PLT entries sufficiently close to the branch if the * section size exceeds the range of a branch * instruction. So ignore relocations against defined * symbols if they live in the same section as the * relocation target.
*/
s = syms + ELF64_R_SYM(rela[i].r_info); if (s->st_shndx == dstidx) break;
/* * Jump relocations with non-zero addends against * undefined symbols are supported by the ELF spec, but * do not occur in practice (e.g., 'jump n bytes past * the entry point of undefined function symbol f'). * So we need to support them, but there is no need to * take them into consideration when trying to optimize * this code. So let's only check for duplicates when * the addend is zero: this allows us to record the PLT * entry address in the symbol table itself, rather than * having to search the list for duplicates each time we * emit one.
*/ if (rela[i].r_addend != 0 || !duplicate_rel(rela, i))
ret++; break; case R_AARCH64_ADR_PREL_PG_HI21_NC: case R_AARCH64_ADR_PREL_PG_HI21: if (!cpus_have_final_cap(ARM64_WORKAROUND_843419)) break;
/* * Determine the minimal safe alignment for this ADRP * instruction: the section alignment at which it is * guaranteed not to appear at a vulnerable offset. * * This comes down to finding the least significant zero * bit in bits [11:3] of the section offset, and * increasing the section's alignment so that the * resulting address of this instruction is guaranteed * to equal the offset in that particular bit (as well * as all less significant bits). This ensures that the * address modulo 4 KB != 0xfff8 or 0xfffc (which would * have all ones in bits [11:3])
*/
min_align = 2ULL << ffz(rela[i].r_offset | 0x7);
/* * Allocate veneer space for each ADRP that may appear * at a vulnerable offset nonetheless. At relocation * time, some of these will remain unused since some * ADRP instructions can be patched to ADR instructions * instead.
*/ if (min_align > SZ_4K)
ret++; else
dstsec->sh_addralign = max(dstsec->sh_addralign,
min_align); break;
}
}
if (cpus_have_final_cap(ARM64_WORKAROUND_843419)) { /* * Add some slack so we can skip PLT slots that may trigger * the erratum due to the placement of the ADRP instruction.
*/
ret += DIV_ROUND_UP(ret, (SZ_4K / sizeof(struct plt_entry)));
}
/* Group branch PLT relas at the front end of the array. */ staticint partition_branch_plt_relas(Elf64_Sym *syms, Elf64_Rela *rela, int numrels, Elf64_Word dstidx)
{ int i = 0, j = numrels - 1;
while (i < j) { if (branch_rela_needs_plt(syms, &rela[i], dstidx))
i++; elseif (branch_rela_needs_plt(syms, &rela[j], dstidx))
swap(rela[i], rela[j]); else
j--;
}
for (i = 0; i < ehdr->e_shnum; i++) {
Elf64_Rela *rels = (void *)ehdr + sechdrs[i].sh_offset; int nents, numrels = sechdrs[i].sh_size / sizeof(Elf64_Rela);
Elf64_Shdr *dstsec = sechdrs + sechdrs[i].sh_info;
if (sechdrs[i].sh_type != SHT_RELA) continue;
/* ignore relocations that operate on non-exec sections */ if (!(dstsec->sh_flags & SHF_EXECINSTR)) continue;
/* * sort branch relocations requiring a PLT by type, symbol index * and addend
*/
nents = partition_branch_plt_relas(syms, rels, numrels,
sechdrs[i].sh_info); if (nents)
sort(rels, nents, sizeof(Elf64_Rela), cmp_rela, NULL);
Die Informationen auf dieser Webseite wurden
nach bestem Wissen sorgfältig zusammengestellt. Es wird jedoch weder Vollständigkeit, noch Richtigkeit,
noch Qualität der bereit gestellten Informationen zugesichert.
Bemerkung:
Die farbliche Syntaxdarstellung und die Messung sind noch experimentell.