bool alternative_is_applied(u16 cpucap)
{ if (WARN_ON(cpucap >= ARM64_NCAPS)) returnfalse;
return test_bit(cpucap, applied_alternatives);
}
/* * Check if the target PC is within an alternative block.
*/ static __always_inline bool branch_insn_requires_update(struct alt_instr *alt, unsignedlong pc)
{ unsignedlong replptr = (unsignedlong)ALT_REPL_PTR(alt); return !(pc >= replptr && pc <= (replptr + alt->alt_len));
}
#define align_down(x, a) ((unsignedlong)(x) & ~(((unsignedlong)(a)) - 1))
if (aarch64_insn_is_branch_imm(insn)) {
s32 offset = aarch64_get_branch_offset(insn); unsignedlong target;
target = (unsignedlong)altinsnptr + offset;
/* * If we're branching inside the alternate sequence, * do not rewrite the instruction, as it is already * correct. Otherwise, generate the new instruction.
*/ if (branch_insn_requires_update(alt, target)) {
offset = target - (unsignedlong)insnptr;
insn = aarch64_set_branch_offset(insn, offset);
}
} elseif (aarch64_insn_is_adrp(insn)) {
s32 orig_offset, new_offset; unsignedlong target;
/* * If we're replacing an adrp instruction, which uses PC-relative * immediate addressing, adjust the offset to reflect the new * PC. adrp operates on 4K aligned addresses.
*/
orig_offset = aarch64_insn_adrp_get_offset(insn);
target = align_down(altinsnptr, SZ_4K) + orig_offset;
new_offset = target - align_down(insnptr, SZ_4K);
insn = aarch64_insn_adrp_set_offset(insn, new_offset);
} elseif (aarch64_insn_uses_literal(insn)) { /* * Disallow patching unhandled instructions using PC relative * literal addresses
*/
BUG();
}
return insn;
}
static noinstr void patch_alternative(struct alt_instr *alt,
__le32 *origptr, __le32 *updptr, int nr_inst)
{
__le32 *replptr; int i;
replptr = ALT_REPL_PTR(alt); for (i = 0; i < nr_inst; i++) {
u32 insn;
/* * We provide our own, private D-cache cleaning function so that we don't * accidentally call into the cache.S code, which is patched by us at * runtime.
*/ static noinstr void clean_dcache_range_nopatch(u64 start, u64 end)
{
u64 cur, d_size, ctr_el0;
ctr_el0 = arm64_ftr_reg_ctrel0.sys_val;
d_size = 4 << cpuid_feature_extract_unsigned_field(ctr_el0,
CTR_EL0_DminLine_SHIFT);
cur = start & ~(d_size - 1); do { /* * We must clean+invalidate to the PoC in order to avoid * Cortex-A53 errata 826319, 827319, 824069 and 819472 * (this corresponds to ARM64_WORKAROUND_CLEAN_CACHE)
*/ asmvolatile("dc civac, %0" : : "r" (cur) : "memory");
} while (cur += d_size, cur < end);
}
/* * We might be patching the stop_machine state machine, so implement a * really simple polling protocol here.
*/ staticint __init __apply_alternatives_multi_stop(void *unused)
{ /* We always have a CPU 0 at this point (__init) */ if (smp_processor_id()) { while (!all_alternatives_applied)
cpu_relax();
isb();
} else {
DECLARE_BITMAP(remaining_capabilities, ARM64_NCAPS);
apply_alternatives_vdso(); /* better not try code patching on a live SMP system */
stop_machine(__apply_alternatives_multi_stop, NULL, cpu_online_mask);
}
/* * This is called very early in the boot process (directly after we run * a feature detect on the boot CPU). No need to worry about other CPUs * here.
*/ void __init apply_boot_alternatives(void)
{ /* If called on non-boot cpu things could go wrong */
WARN_ON(smp_processor_id() != 0);
Die Informationen auf dieser Webseite wurden
nach bestem Wissen sorgfältig zusammengestellt. Es wird jedoch weder Vollständigkeit, noch Richtigkeit,
noch Qualität der bereit gestellten Informationen zugesichert.
Bemerkung:
Die farbliche Syntaxdarstellung und die Messung sind noch experimentell.