/* We don't support the memory-mapped interface. */ return (arch >= ARM_DEBUG_ARCH_V6 && arch <= ARM_DEBUG_ARCH_V7_ECP14) ||
arch >= ARM_DEBUG_ARCH_V7_1;
}
/* Can we determine the watchpoint access type from the fsr? */ staticint debug_exception_updates_fsr(void)
{ return get_debug_arch() >= ARM_DEBUG_ARCH_V8;
}
/* Does this core support mismatch breakpoints? */ staticint core_has_mismatch_brps(void)
{ return (get_debug_arch() >= ARM_DEBUG_ARCH_V7_ECP14 &&
get_num_brp_resources() > 1);
}
/* Determine number of usable WRPs available. */ staticint get_num_wrps(void)
{ /* * On debug architectures prior to 7.1, when a watchpoint fires, the * only way to work out which watchpoint it was is by disassembling * the faulting instruction and working out the address of the memory * access. * * Furthermore, we can only do this if the watchpoint was precise * since imprecise watchpoints prevent us from calculating register * based addresses. * * Providing we have more than 1 breakpoint register, we only report * a single watchpoint register for the time being. This way, we always * know which watchpoint fired. In the future we can either add a * disassembler and address generation emulator, or we can insert a * check to see if the DFAR is set on watchpoint exception entry * [the ARM ARM states that the DFAR is UNKNOWN, but experience shows * that it is set on some implementations].
*/ if (get_debug_arch() < ARM_DEBUG_ARCH_V7_1) return 1;
return get_num_wrp_resources();
}
/* Determine number of usable BRPs available. */ staticint get_num_brps(void)
{ int brps = get_num_brp_resources(); return core_has_mismatch_brps() ? brps - 1 : brps;
}
/* * In order to access the breakpoint/watchpoint control registers, * we must be running in debug monitor mode. Unfortunately, we can * be put into halting debug mode at any time by an external debugger * but there is nothing we can do to prevent that.
*/ staticint monitor_mode_enabled(void)
{
u32 dscr;
ARM_DBG_READ(c0, c1, 0, dscr); return !!(dscr & ARM_DSCR_MDBGEN);
}
/* If monitor mode is already enabled, just return. */ if (dscr & ARM_DSCR_MDBGEN) goto out;
/* Write to the corresponding DSCR. */ switch (get_debug_arch()) { case ARM_DEBUG_ARCH_V6: case ARM_DEBUG_ARCH_V6_1:
ARM_DBG_WRITE(c0, c1, 0, (dscr | ARM_DSCR_MDBGEN)); break; case ARM_DEBUG_ARCH_V7_ECP14: case ARM_DEBUG_ARCH_V7_1: case ARM_DEBUG_ARCH_V8: case ARM_DEBUG_ARCH_V8_1: case ARM_DEBUG_ARCH_V8_2: case ARM_DEBUG_ARCH_V8_4:
ARM_DBG_WRITE(c0, c2, 2, (dscr | ARM_DSCR_MDBGEN));
isb(); break; default: return -ENODEV;
}
/* Check that the write made it through. */
ARM_DBG_READ(c0, c1, 0, dscr); if (!(dscr & ARM_DSCR_MDBGEN)) {
pr_warn_once("Failed to enable monitor mode on CPU %d.\n",
smp_processor_id()); return -EPERM;
}
out: return 0;
}
int hw_breakpoint_slots(int type)
{ if (!debug_arch_supported()) return 0;
/* * We can be called early, so don't rely on * our static variables being initialised.
*/ switch (type) { case TYPE_INST: return get_num_brps(); case TYPE_DATA: return get_num_wrps(); default:
pr_warn("unknown slot type: %d\n", type); return 0;
}
}
/* * Check if 8-bit byte-address select is available. * This clobbers WRP 0.
*/ static u8 get_max_wp_len(void)
{
u32 ctrl_reg; struct arch_hw_breakpoint_ctrl ctrl;
u8 size = 4;
if (debug_arch < ARM_DEBUG_ARCH_V7_ECP14) goto out;
/* Remove the breakpoint. */ for (i = 0; i < max_slots; ++i) {
slot = &slots[i];
if (*slot == bp) {
*slot = NULL; break;
}
}
if (i == max_slots) {
pr_warn("Can't find any breakpoint slot\n"); return;
}
/* Ensure that we disable the mismatch breakpoint. */ if (info->ctrl.type != ARM_BREAKPOINT_EXECUTE &&
info->step_ctrl.enabled) {
i = 0;
base = ARM_BASE_BCR + core_num_brps;
}
/* Reset the control register. */
write_wb_reg(base + i, 0);
}
/* * Extract generic type and length encodings from an arch_hw_breakpoint_ctrl. * Hopefully this will disappear when ptrace can bypass the conversion * to generic breakpoint descriptions.
*/ int arch_bp_generic_fields(struct arch_hw_breakpoint_ctrl ctrl, int *gen_len, int *gen_type)
{ /* Type */ switch (ctrl.type) { case ARM_BREAKPOINT_EXECUTE:
*gen_type = HW_BREAKPOINT_X; break; case ARM_BREAKPOINT_LOAD:
*gen_type = HW_BREAKPOINT_R; break; case ARM_BREAKPOINT_STORE:
*gen_type = HW_BREAKPOINT_W; break; case ARM_BREAKPOINT_LOAD | ARM_BREAKPOINT_STORE:
*gen_type = HW_BREAKPOINT_RW; break; default: return -EINVAL;
}
/* Len */ switch (ctrl.len) { case ARM_BREAKPOINT_LEN_1:
*gen_len = HW_BREAKPOINT_LEN_1; break; case ARM_BREAKPOINT_LEN_2:
*gen_len = HW_BREAKPOINT_LEN_2; break; case ARM_BREAKPOINT_LEN_4:
*gen_len = HW_BREAKPOINT_LEN_4; break; case ARM_BREAKPOINT_LEN_8:
*gen_len = HW_BREAKPOINT_LEN_8; break; default: return -EINVAL;
}
return 0;
}
/* * Construct an arch_hw_breakpoint from a perf_event.
*/ staticint arch_build_bp_info(struct perf_event *bp, conststruct perf_event_attr *attr, struct arch_hw_breakpoint *hw)
{ /* Type */ switch (attr->bp_type) { case HW_BREAKPOINT_X:
hw->ctrl.type = ARM_BREAKPOINT_EXECUTE; break; case HW_BREAKPOINT_R:
hw->ctrl.type = ARM_BREAKPOINT_LOAD; break; case HW_BREAKPOINT_W:
hw->ctrl.type = ARM_BREAKPOINT_STORE; break; case HW_BREAKPOINT_RW:
hw->ctrl.type = ARM_BREAKPOINT_LOAD | ARM_BREAKPOINT_STORE; break; default: return -EINVAL;
}
/* Len */ switch (attr->bp_len) { case HW_BREAKPOINT_LEN_1:
hw->ctrl.len = ARM_BREAKPOINT_LEN_1; break; case HW_BREAKPOINT_LEN_2:
hw->ctrl.len = ARM_BREAKPOINT_LEN_2; break; case HW_BREAKPOINT_LEN_4:
hw->ctrl.len = ARM_BREAKPOINT_LEN_4; break; case HW_BREAKPOINT_LEN_8:
hw->ctrl.len = ARM_BREAKPOINT_LEN_8; if ((hw->ctrl.type != ARM_BREAKPOINT_EXECUTE)
&& max_watchpoint_len >= 8) break;
fallthrough; default: return -EINVAL;
}
/* * Breakpoints must be of length 2 (thumb) or 4 (ARM) bytes. * Watchpoints can be of length 1, 2, 4 or 8 bytes if supported * by the hardware and must be aligned to the appropriate number of * bytes.
*/ if (hw->ctrl.type == ARM_BREAKPOINT_EXECUTE &&
hw->ctrl.len != ARM_BREAKPOINT_LEN_2 &&
hw->ctrl.len != ARM_BREAKPOINT_LEN_4) return -EINVAL;
if (is_default_overflow_handler(bp)) { /* * Mismatch breakpoints are required for single-stepping * breakpoints.
*/ if (!core_has_mismatch_brps()) return -EINVAL;
/* We don't allow mismatch breakpoints in kernel space. */ if (arch_check_bp_in_kernelspace(hw)) return -EPERM;
/* * Per-cpu breakpoints are not supported by our stepping * mechanism.
*/ if (!bp->hw.target) return -EINVAL;
/* * We only support specific access types if the fsr * reports them.
*/ if (!debug_exception_updates_fsr() &&
(hw->ctrl.type == ARM_BREAKPOINT_LOAD ||
hw->ctrl.type == ARM_BREAKPOINT_STORE)) return -EINVAL;
}
out: return ret;
}
/* * Enable/disable single-stepping over the breakpoint bp at address addr.
*/ staticvoid enable_single_step(struct perf_event *bp, u32 addr)
{ struct arch_hw_breakpoint *info = counter_arch_bp(bp);
/* * Arm32 hardware does not always report a watchpoint hit address that matches * one of the watchpoints set. It can also report an address "near" the * watchpoint if a single instruction access both watched and unwatched * addresses. There is no straight-forward way, short of disassembling the * offending instruction, to map that address back to the watchpoint. This * function computes the distance of the memory access from the watchpoint as a * heuristic for the likelyhood that a given access triggered the watchpoint. * * See this same function in the arm64 platform code, which has the same * problem. * * The function returns the distance of the address from the bytes watched by * the watchpoint. In case of an exact match, it returns 0.
*/ static u32 get_distance_from_watchpoint(unsignedlong addr, u32 val, struct arch_hw_breakpoint_ctrl *ctrl)
{
u32 wp_low, wp_high;
u32 lens, lene;
lens = __ffs(ctrl->len);
lene = __fls(ctrl->len);
wp_low = val + lens;
wp_high = val + lene; if (addr < wp_low) return wp_low - addr; elseif (addr > wp_high) return addr - wp_high; else return 0;
}
/* * Find all watchpoints that match the reported address. If no exact * match is found. Attribute the hit to the closest watchpoint.
*/
rcu_read_lock(); for (i = 0; i < core_num_wrps; ++i) {
wp = slots[i]; if (wp == NULL) continue;
/* * The DFAR is an unknown value on debug architectures prior * to 7.1. Since we only allow a single watchpoint on these * older CPUs, we can set the trigger to the lowest possible * faulting address.
*/ if (debug_arch < ARM_DEBUG_ARCH_V7_1) {
BUG_ON(i > 0);
info = counter_arch_bp(wp);
info->trigger = wp->attr.bp_addr;
} else { /* Check that the access type matches. */ if (debug_exception_updates_fsr()) {
access = (fsr & ARM_FSR_ACCESS_MASK) ?
HW_BREAKPOINT_W : HW_BREAKPOINT_R; if (!(access & hw_breakpoint_type(wp))) continue;
}
val = read_wb_reg(ARM_BASE_WVR + i);
ctrl_reg = read_wb_reg(ARM_BASE_WCR + i);
decode_ctrl_reg(ctrl_reg, &ctrl);
dist = get_distance_from_watchpoint(addr, val, &ctrl); if (dist < min_dist) {
min_dist = dist;
closest_match = i;
} /* Is this an exact match? */ if (dist != 0) continue;
/* We have a winner. */
info = counter_arch_bp(wp);
info->trigger = addr;
}
/* * If we triggered a user watchpoint from a uaccess routine, * then handle the stepping ourselves since userspace really * can't help us with this.
*/ if (watchpoint_fault_on_uaccess(regs, info)) goto step;
perf_bp_event(wp, regs);
/* * Defer stepping to the overflow handler if one is installed. * Otherwise, insert a temporary mismatch breakpoint so that * we can single-step over the watchpoint trigger.
*/ if (!is_default_overflow_handler(wp)) continue;
step:
enable_single_step(wp, instruction_pointer(regs));
}
if (min_dist > 0 && min_dist != -1) { /* No exact match found. */
wp = slots[closest_match];
info = counter_arch_bp(wp);
info->trigger = addr;
pr_debug("watchpoint fired: address = 0x%x\n", info->trigger);
perf_bp_event(wp, regs); if (is_default_overflow_handler(wp))
enable_single_step(wp, instruction_pointer(regs));
}
/* The exception entry code places the amended lr in the PC. */
addr = regs->ARM_pc;
/* Check the currently installed breakpoints first. */ for (i = 0; i < core_num_brps; ++i) {
rcu_read_lock();
bp = slots[i];
if (bp == NULL) goto unlock;
info = counter_arch_bp(bp);
/* Check if the breakpoint value matches. */
val = read_wb_reg(ARM_BASE_BVR + i); if (val != (addr & ~0x3)) goto mismatch;
/* Possible match, check the byte address select to confirm. */
ctrl_reg = read_wb_reg(ARM_BASE_BCR + i);
decode_ctrl_reg(ctrl_reg, &ctrl); if ((1 << (addr & 0x3)) & ctrl.len) {
info->trigger = addr;
pr_debug("breakpoint fired: address = 0x%x\n", addr);
perf_bp_event(bp, regs); if (is_default_overflow_handler(bp))
enable_single_step(bp, addr); goto unlock;
}
mismatch: /* If we're stepping a breakpoint, it can now be restored. */ if (info->step_ctrl.enabled)
disable_single_step(bp);
unlock:
rcu_read_unlock();
}
/* Handle any pending watchpoint single-step breakpoints. */
watchpoint_single_step_handler(addr);
}
#ifdef CONFIG_CFI_CLANG staticvoid hw_breakpoint_cfi_handler(struct pt_regs *regs)
{ /* * TODO: implementing target and type to pass to CFI using the more * elaborate report_cfi_failure() requires compiler work. To be able * to properly extract target information the compiler needs to * emit a stable instructions sequence for the CFI checks so we can * decode the instructions preceding the trap and figure out which * registers were used.
*/
/* * Called from either the Data Abort Handler [watchpoint] or the * Prefetch Abort Handler [breakpoint] with interrupts disabled.
*/ staticint hw_breakpoint_pending(unsignedlong addr, unsignedint fsr, struct pt_regs *regs)
{ int ret = 0;
u32 dscr;
preempt_disable();
if (interrupts_enabled(regs))
local_irq_enable();
/* We only handle watchpoints and hardware breakpoints. */
ARM_DBG_READ(c0, c1, 0, dscr);
/* Perform perf callbacks. */ switch (ARM_DSCR_MOE(dscr)) { case ARM_ENTRY_BREAKPOINT:
breakpoint_handler(addr, regs); break; case ARM_ENTRY_ASYNC_WATCHPOINT:
WARN(1, "Asynchronous watchpoint exception taken. Debugging results may be unreliable\n");
fallthrough; case ARM_ENTRY_SYNC_WATCHPOINT:
watchpoint_handler(addr, fsr, regs); break; case ARM_ENTRY_CFI_BREAKPOINT:
hw_breakpoint_cfi_handler(regs); break; default:
ret = 1; /* Unhandled fault. */
}
staticint debug_reg_trap(struct pt_regs *regs, unsignedint instr)
{ int cpu = smp_processor_id();
pr_warn("Debug register access (0x%x) caused undefined instruction on CPU %d\n",
instr, cpu);
/* Set the error flag for this CPU and skip the faulting instruction. */
cpumask_set_cpu(cpu, &debug_err_mask);
instruction_pointer(regs) += 4; return 0;
}
/* Does this core support OS Save and Restore? */ staticbool core_has_os_save_restore(void)
{
u32 oslsr;
switch (get_debug_arch()) { case ARM_DEBUG_ARCH_V7_1: returntrue; case ARM_DEBUG_ARCH_V7_ECP14: #ifdef CONFIG_ARM_ERRATA_764319
oslsr_fault = 0;
register_undef_hook(&debug_oslsr_hook);
ARM_DBG_READ(c1, c1, 4, oslsr);
unregister_undef_hook(&debug_oslsr_hook); if (oslsr_fault) returnfalse; #else
ARM_DBG_READ(c1, c1, 4, oslsr); #endif if (oslsr & ARM_OSLSR_OSLM0) returntrue;
fallthrough; default: returnfalse;
}
}
staticvoid reset_ctrl_regs(unsignedint cpu)
{ int i, raw_num_brps, err = 0;
u32 val;
/* * v7 debug contains save and restore registers so that debug state * can be maintained across low-power modes without leaving the debug * logic powered up. It is IMPLEMENTATION DEFINED whether we can access * the debug registers out of reset, so we must unlock the OS Lock * Access Register to avoid taking undefined instruction exceptions * later on.
*/ switch (debug_arch) { case ARM_DEBUG_ARCH_V6: case ARM_DEBUG_ARCH_V6_1: /* ARMv6 cores clear the registers out of reset. */ goto out_mdbgen; case ARM_DEBUG_ARCH_V7_ECP14: /* * Ensure sticky power-down is clear (i.e. debug logic is * powered up).
*/
ARM_DBG_READ(c1, c5, 4, val); if ((val & 0x1) == 0)
err = -EPERM;
if (!has_ossr) goto clear_vcr; break; case ARM_DEBUG_ARCH_V7_1: /* * Ensure the OS double lock is clear.
*/
ARM_DBG_READ(c1, c3, 4, val); if ((val & 0x1) == 1)
err = -EPERM; break;
}
if (err) {
pr_warn_once("CPU %d debug is powered down!\n", cpu);
cpumask_or(&debug_err_mask, &debug_err_mask, cpumask_of(cpu)); return;
}
/* * Unconditionally clear the OS lock by writing a value * other than CS_LAR_KEY to the access register.
*/
ARM_DBG_WRITE(c1, c0, 4, ~CORESIGHT_UNLOCK);
isb();
/* * Clear any configured vector-catch events before * enabling monitor mode.
*/
clear_vcr:
ARM_DBG_WRITE(c0, c7, 0, 0);
isb();
if (cpumask_intersects(&debug_err_mask, cpumask_of(cpu))) {
pr_warn_once("CPU %d failed to disable vector catch\n", cpu); return;
}
/* * The control/value register pairs are UNKNOWN out of reset so * clear them to avoid spurious debug events.
*/
raw_num_brps = get_num_brp_resources(); for (i = 0; i < raw_num_brps; ++i) {
write_wb_reg(ARM_BASE_BCR + i, 0UL);
write_wb_reg(ARM_BASE_BVR + i, 0UL);
}
for (i = 0; i < core_num_wrps; ++i) {
write_wb_reg(ARM_BASE_WCR + i, 0UL);
write_wb_reg(ARM_BASE_WVR + i, 0UL);
}
if (cpumask_intersects(&debug_err_mask, cpumask_of(cpu))) {
pr_warn_once("CPU %d failed to clear debug register pairs\n", cpu); return;
}
/* * Have a crack at enabling monitor mode. We don't actually need * it yet, but reporting an error early is useful if it fails.
*/
out_mdbgen: if (enable_monitor_mode())
cpumask_or(&debug_err_mask, &debug_err_mask, cpumask_of(cpu));
}
/* * Scorpion CPUs (at least those in APQ8060) seem to set DBGPRSR.SPD * whenever a WFI is issued, even if the core is not powered down, in * violation of the architecture. When DBGPRSR.SPD is set, accesses to * breakpoint and watchpoint registers are treated as undefined, so * this results in boot time and runtime failures when these are * accessed and we unexpectedly take a trap. * * It's not clear if/how this can be worked around, so we blacklist * Scorpion CPUs to avoid these issues.
*/ if (read_cpuid_part() == ARM_CPU_PART_SCORPION) {
pr_info("Scorpion CPU detected. Hardware breakpoints and watchpoints disabled\n"); return 0;
}
has_ossr = core_has_os_save_restore();
/* Determine how many BRPs/WRPs are available. */
core_num_brps = get_num_brps();
core_num_wrps = get_num_wrps();
/* * We need to tread carefully here because DBGSWENABLE may be * driven low on this core and there isn't an architected way to * determine that.
*/
cpus_read_lock();
register_undef_hook(&debug_reg_hook);
/* * Register CPU notifier which resets the breakpoint resources. We * assume that a halting debugger will leave the world in a nice state * for us.
*/
ret = cpuhp_setup_state_cpuslocked(CPUHP_AP_ONLINE_DYN, "arm/hw_breakpoint:online",
dbg_reset_online, NULL);
unregister_undef_hook(&debug_reg_hook); if (WARN_ON(ret < 0) || !cpumask_empty(&debug_err_mask)) {
core_num_brps = 0;
core_num_wrps = 0; if (ret > 0)
cpuhp_remove_state_nocalls_cpuslocked(ret);
cpus_read_unlock(); return 0;
}
/* Work out the maximum supported watchpoint length. */
max_watchpoint_len = get_max_wp_len();
pr_info("maximum watchpoint size is %u bytes.\n",
max_watchpoint_len);
Die Informationen auf dieser Webseite wurden
nach bestem Wissen sorgfältig zusammengestellt. Es wird jedoch weder Vollständigkeit, noch Richtigkeit,
noch Qualität der bereit gestellten Informationen zugesichert.
Bemerkung:
Die farbliche Syntaxdarstellung und die Messung sind noch experimentell.