/* * Prepare insn slot, Mark Rutland points out it depends on a coupe of * subtleties: * * - That the I-cache maintenance for these instructions is complete * *before* the kprobe BRK is written (and aarch64_insn_patch_text_nosync() * ensures this, but just omits causing a Context-Synchronization-Event * on all CPUS). * * - That the kprobe BRK results in an exception (and consequently a * Context-Synchronoization-Event), which ensures that the CPU will * fetch thesingle-step slot instructions *after* this, ensuring that * the new instructions are used * * It supposes to place ISB after patching to guarantee I-cache maintenance * is observed on all CPUS, however, single-step slot is installed in * the BRK exception handler, so it is unnecessary to generate * Contex-Synchronization-Event via ISB again.
*/
aarch64_insn_patch_text_nosync(addr, le32_to_cpu(p->opcode));
aarch64_insn_patch_text_nosync(addr + 1, BRK64_OPCODE_KPROBES_SS);
/* * Needs restoring of return address after stepping xol.
*/
p->ainsn.xol_restore = (unsignedlong) p->addr + sizeof(kprobe_opcode_t);
}
staticvoid __kprobes arch_prepare_simulate(struct kprobe *p)
{ /* This instructions is not executed xol. No need to adjust the PC */
p->ainsn.xol_restore = 0;
}
/* * Mask all of DAIF while executing the instruction out-of-line, to keep things * simple and avoid nesting exceptions. Interrupts do have to be disabled since * the kprobe state is per-CPU and doesn't get migrated.
*/ staticvoid __kprobes kprobes_save_local_irqflag(struct kprobe_ctlblk *kcb, struct pt_regs *regs)
{
kcb->saved_irqflag = regs->pstate & DAIF_MASK;
regs->pstate |= DAIF_MASK;
}
switch (kcb->kprobe_status) { case KPROBE_HIT_SS: case KPROBE_REENTER: /* * We are here because the instruction being single * stepped caused a page fault. We reset the current * kprobe and the ip points back to the probe address * and allow the page fault handler to continue as a * normal page fault.
*/
instruction_pointer_set(regs, (unsignedlong) cur->addr);
BUG_ON(!instruction_pointer(regs));
p = get_kprobe((kprobe_opcode_t *) addr); if (WARN_ON_ONCE(!p)) { /* * Something went wrong. This BRK used an immediate reserved * for kprobes, but we couldn't find any corresponding probe.
*/ return DBG_HOOK_ERROR;
}
if (cur_kprobe) { /* Hit a kprobe inside another kprobe */ if (!reenter_kprobe(p, regs, kcb)) return DBG_HOOK_ERROR;
} else { /* Probe hit */
set_current_kprobe(p);
kcb->kprobe_status = KPROBE_HIT_ACTIVE;
/* * If we have no pre-handler or it returned 0, we * continue with normal processing. If we have a * pre-handler and it returned non-zero, it will * modify the execution path and not need to single-step * Let's just reset current kprobe and exit.
*/ if (!p->pre_handler || !p->pre_handler(p, regs))
setup_singlestep(p, regs, kcb, 0); else
reset_current_kprobe();
}
/* * Provide a blacklist of symbols identifying ranges which cannot be kprobed. * This blacklist is exposed to userspace via debugfs (kprobes/blacklist).
*/ int __init arch_populate_kprobe_blacklist(void)
{ int ret;
ret = kprobe_add_area_blacklist((unsignedlong)__entry_text_start,
(unsignedlong)__entry_text_end); if (ret) return ret;
ret = kprobe_add_area_blacklist((unsignedlong)__irqentry_text_start,
(unsignedlong)__irqentry_text_end); if (ret) return ret;
ret = kprobe_add_area_blacklist((unsignedlong)__hyp_text_start,
(unsignedlong)__hyp_text_end); if (ret || is_kernel_in_hyp_mode()) return ret;
ret = kprobe_add_area_blacklist((unsignedlong)__hyp_idmap_text_start,
(unsignedlong)__hyp_idmap_text_end); return ret;
}
Die Informationen auf dieser Webseite wurden
nach bestem Wissen sorgfältig zusammengestellt. Es wird jedoch weder Vollständigkeit, noch Richtigkeit,
noch Qualität der bereit gestellten Informationen zugesichert.
Bemerkung:
Die farbliche Syntaxdarstellung und die Messung sind noch experimentell.