/* * The ELF psABI for AArch64 documents the 16-bit and 32-bit place * relative and absolute relocations as having a range of [-2^15, 2^16) * or [-2^31, 2^32), respectively. However, in order to be able to * detect overflows reliably, we have to choose whether we interpret * such quantities as signed or as unsigned, and stick with it. * The way we organize our address space requires a signed * interpretation of 32-bit relative references, so let's use that * for all R_AARCH64_PRELxx relocations. This means our upper * bound for overflow detection should be Sxx_MAX rather than Uxx_MAX.
*/
switch (len) { case 16:
WRITE_PLACE((s16 *)place, sval, me); switch (op) { case RELOC_OP_ABS: if (sval < 0 || sval > U16_MAX) return -ERANGE; break; case RELOC_OP_PREL: if (sval < S16_MIN || sval > S16_MAX) return -ERANGE; break; default:
pr_err("Invalid 16-bit data relocation (%d)\n", op); return 0;
} break; case 32:
WRITE_PLACE((s32 *)place, sval, me); switch (op) { case RELOC_OP_ABS: if (sval < 0 || sval > U32_MAX) return -ERANGE; break; case RELOC_OP_PREL: if (sval < S32_MIN || sval > S32_MAX) return -ERANGE; break; default:
pr_err("Invalid 32-bit data relocation (%d)\n", op); return 0;
} break; case 64:
WRITE_PLACE((s64 *)place, sval, me); break; default:
pr_err("Invalid length (%d) for data relocation\n", len); return 0;
} return 0;
}
if (imm_type == AARCH64_INSN_IMM_MOVNZ) { /* * For signed MOVW relocations, we have to manipulate the * instruction encoding depending on whether or not the * immediate is less than zero.
*/
insn &= ~(3 << 29); if (sval >= 0) { /* >=0: Set the instruction to MOVZ (opcode 10b). */
insn |= 2 << 29;
} else { /* * <0: Set the instruction to MOVN (opcode 00b). * Since we've masked the opcode already, we * don't need to do anything other than * inverting the new immediate field.
*/
imm = ~imm;
}
}
/* Update the instruction with the new encoding. */
insn = aarch64_insn_encode_immediate(AARCH64_INSN_IMM_16, insn, imm);
WRITE_PLACE(place, cpu_to_le32(insn), me);
if (!is_forbidden_offset_for_adrp(place)) return reloc_insn_imm(RELOC_OP_PAGE, place, val, 12, 21,
AARCH64_INSN_IMM_ADR, me);
/* patch ADRP to ADR if it is in range */ if (!reloc_insn_imm(RELOC_OP_PREL, place, val & ~0xfff, 0, 21,
AARCH64_INSN_IMM_ADR, me)) {
insn = le32_to_cpu(*place);
insn &= ~BIT(31);
} else { /* out of range for ADR -> emit a veneer */
val = module_emit_veneer_for_adrp(mod, sechdrs, place, val & ~0xfff); if (!val) return -ENOEXEC;
insn = aarch64_insn_gen_branch_imm((u64)place, val,
AARCH64_INSN_BRANCH_NOLINK);
}
for (i = 0; i < sechdrs[relsec].sh_size / sizeof(*rel); i++) { /* loc corresponds to P in the AArch64 ELF document. */
loc = (void *)sechdrs[sechdrs[relsec].sh_info].sh_addr
+ rel[i].r_offset;
/* sym is the ELF symbol we're referring to. */
sym = (Elf64_Sym *)sechdrs[symindex].sh_addr
+ ELF64_R_SYM(rel[i].r_info);
/* val corresponds to (S + A) in the AArch64 ELF document. */
val = sym->st_value + rel[i].r_addend;
/* Check for overflow by default. */
overflow_check = true;
/* Perform the static relocation. */ switch (ELF64_R_TYPE(rel[i].r_info)) { /* Null relocations. */ case R_ARM_NONE: case R_AARCH64_NONE:
ovf = 0; break;
/* Data relocations. */ case R_AARCH64_ABS64:
overflow_check = false;
ovf = reloc_data(RELOC_OP_ABS, loc, val, 64, me); break; case R_AARCH64_ABS32:
ovf = reloc_data(RELOC_OP_ABS, loc, val, 32, me); break; case R_AARCH64_ABS16:
ovf = reloc_data(RELOC_OP_ABS, loc, val, 16, me); break; case R_AARCH64_PREL64:
overflow_check = false;
ovf = reloc_data(RELOC_OP_PREL, loc, val, 64, me); break; case R_AARCH64_PREL32:
ovf = reloc_data(RELOC_OP_PREL, loc, val, 32, me); break; case R_AARCH64_PREL16:
ovf = reloc_data(RELOC_OP_PREL, loc, val, 16, me); break;
/* MOVW instruction relocations. */ case R_AARCH64_MOVW_UABS_G0_NC:
overflow_check = false;
fallthrough; case R_AARCH64_MOVW_UABS_G0:
ovf = reloc_insn_movw(RELOC_OP_ABS, loc, val, 0,
AARCH64_INSN_IMM_MOVKZ, me); break; case R_AARCH64_MOVW_UABS_G1_NC:
overflow_check = false;
fallthrough; case R_AARCH64_MOVW_UABS_G1:
ovf = reloc_insn_movw(RELOC_OP_ABS, loc, val, 16,
AARCH64_INSN_IMM_MOVKZ, me); break; case R_AARCH64_MOVW_UABS_G2_NC:
overflow_check = false;
fallthrough; case R_AARCH64_MOVW_UABS_G2:
ovf = reloc_insn_movw(RELOC_OP_ABS, loc, val, 32,
AARCH64_INSN_IMM_MOVKZ, me); break; case R_AARCH64_MOVW_UABS_G3: /* We're using the top bits so we can't overflow. */
overflow_check = false;
ovf = reloc_insn_movw(RELOC_OP_ABS, loc, val, 48,
AARCH64_INSN_IMM_MOVKZ, me); break; case R_AARCH64_MOVW_SABS_G0:
ovf = reloc_insn_movw(RELOC_OP_ABS, loc, val, 0,
AARCH64_INSN_IMM_MOVNZ, me); break; case R_AARCH64_MOVW_SABS_G1:
ovf = reloc_insn_movw(RELOC_OP_ABS, loc, val, 16,
AARCH64_INSN_IMM_MOVNZ, me); break; case R_AARCH64_MOVW_SABS_G2:
ovf = reloc_insn_movw(RELOC_OP_ABS, loc, val, 32,
AARCH64_INSN_IMM_MOVNZ, me); break; case R_AARCH64_MOVW_PREL_G0_NC:
overflow_check = false;
ovf = reloc_insn_movw(RELOC_OP_PREL, loc, val, 0,
AARCH64_INSN_IMM_MOVKZ, me); break; case R_AARCH64_MOVW_PREL_G0:
ovf = reloc_insn_movw(RELOC_OP_PREL, loc, val, 0,
AARCH64_INSN_IMM_MOVNZ, me); break; case R_AARCH64_MOVW_PREL_G1_NC:
overflow_check = false;
ovf = reloc_insn_movw(RELOC_OP_PREL, loc, val, 16,
AARCH64_INSN_IMM_MOVKZ, me); break; case R_AARCH64_MOVW_PREL_G1:
ovf = reloc_insn_movw(RELOC_OP_PREL, loc, val, 16,
AARCH64_INSN_IMM_MOVNZ, me); break; case R_AARCH64_MOVW_PREL_G2_NC:
overflow_check = false;
ovf = reloc_insn_movw(RELOC_OP_PREL, loc, val, 32,
AARCH64_INSN_IMM_MOVKZ, me); break; case R_AARCH64_MOVW_PREL_G2:
ovf = reloc_insn_movw(RELOC_OP_PREL, loc, val, 32,
AARCH64_INSN_IMM_MOVNZ, me); break; case R_AARCH64_MOVW_PREL_G3: /* We're using the top bits so we can't overflow. */
overflow_check = false;
ovf = reloc_insn_movw(RELOC_OP_PREL, loc, val, 48,
AARCH64_INSN_IMM_MOVNZ, me); break;
s = find_section(hdr, sechdrs, ".text.ftrace_trampoline"); if (!s) return -ENOEXEC;
plts = (void *)s->sh_addr;
__init_plt(&plts[FTRACE_PLT_IDX], FTRACE_ADDR);
mod->arch.ftrace_trampolines = plts;
s = find_section(hdr, sechdrs, ".init.text.ftrace_trampoline"); if (!s) return -ENOEXEC;
plts = (void *)s->sh_addr;
__init_plt(&plts[FTRACE_PLT_IDX], FTRACE_ADDR);
mod->arch.init_ftrace_trampolines = plts;
#endif return 0;
}
int module_finalize(const Elf_Ehdr *hdr, const Elf_Shdr *sechdrs, struct module *me)
{ const Elf_Shdr *s; int ret;
s = find_section(hdr, sechdrs, ".altinstructions"); if (s)
apply_alternatives_module((void *)s->sh_addr, s->sh_size);
if (scs_is_dynamic()) {
s = find_section(hdr, sechdrs, ".init.eh_frame"); if (s) {
ret = __pi_scs_patch((void *)s->sh_addr, s->sh_size); if (ret)
pr_err("module %s: error occurred during dynamic SCS patching (%d)\n",
me->name, ret);
}
}
¤ Die Informationen auf dieser Webseite wurden
nach bestem Wissen sorgfältig zusammengestellt. Es wird jedoch weder Vollständigkeit, noch Richtigkeit,
noch Qualität der bereit gestellten Informationen zugesichert.0.13Bemerkung:
(vorverarbeitet)
¤
Die Informationen auf dieser Webseite wurden
nach bestem Wissen sorgfältig zusammengestellt. Es wird jedoch weder Vollständigkeit, noch Richtigkeit,
noch Qualität der bereit gestellten Informationen zugesichert.
Bemerkung:
Die farbliche Syntaxdarstellung und die Messung sind noch experimentell.