/* * bits definition for EDDEVID1:PSCROffset * * NOTE: armv8 and armv7 have different definition for the register, * so consolidate the bits definition as below: * * 0b0000 - Sample offset applies based on the instruction state, we * rely on EDDEVID to check if EDPCSR is implemented or not * 0b0001 - No offset applies. * 0b0010 - No offset applies, but do not use in AArch32 mode *
*/ #define EDDEVID1_PCSR_OFFSET_MASK GENMASK(3, 0) #define EDDEVID1_PCSR_OFFSET_INS_SET (0x0) #define EDDEVID1_PCSR_NO_OFFSET_DIS_AARCH32 (0x2)
/* Make sure the registers are unlocked before accessing */
wmb();
}
/* * According to ARM DDI 0487A.k, before access external debug * registers should firstly check the access permission; if any * below condition has been met then cannot access debug * registers to avoid lockup issue: * * - CPU power domain is powered off; * - The OS Double Lock is locked; * * By checking EDPRSR can get to know if meet these conditions.
*/ staticbool debug_access_permitted(struct debug_drvdata *drvdata)
{ /* CPU is powered off */ if (!(drvdata->edprsr & EDPRSR_PU)) returnfalse;
/* The OS Double Lock is locked */ if (drvdata->edprsr & EDPRSR_DLK) returnfalse;
/* * Send request to power management controller and assert * DBGPWRUPREQ signal; if power management controller has * sane implementation, it should enable CPU power domain * in case CPU is in low power state.
*/
edprcr = readl_relaxed(drvdata->base + EDPRCR);
edprcr |= EDPRCR_COREPURQ;
writel_relaxed(edprcr, drvdata->base + EDPRCR);
/* Wait for CPU to be powered up (timeout~=32ms) */ if (readx_poll_timeout_atomic(readl_relaxed, drvdata->base + EDPRSR,
drvdata->edprsr, (drvdata->edprsr & EDPRSR_PU),
DEBUG_WAIT_SLEEP, DEBUG_WAIT_TIMEOUT)) { /* * Unfortunately the CPU cannot be powered up, so return * back and later has no permission to access other * registers. For this case, should disable CPU low power * states to ensure CPU power domain is enabled!
*/
dev_err(drvdata->dev, "%s: power up request for CPU%d failed\n",
__func__, drvdata->cpu); return;
}
/* * At this point the CPU is powered up, so set the no powerdown * request bit so we don't lose power and emulate power down.
*/
edprcr = readl_relaxed(drvdata->base + EDPRCR);
edprcr |= EDPRCR_COREPURQ | EDPRCR_CORENPDRQ;
writel_relaxed(edprcr, drvdata->base + EDPRCR);
/* * As described in ARM DDI 0487A.k, if the processing * element (PE) is in debug state, or sample-based * profiling is prohibited, EDPCSR reads as 0xFFFFFFFF; * EDCIDSR, EDVIDSR and EDPCSR_HI registers also become * UNKNOWN state. So directly bail out for this case.
*/ if (drvdata->edpcsr == EDPCSR_PROHIBITED) goto out;
/* * A read of the EDPCSR normally has the side-effect of * indirectly writing to EDCIDSR, EDVIDSR and EDPCSR_HI; * at this point it's safe to read value from them.
*/ if (IS_ENABLED(CONFIG_64BIT))
drvdata->edpcsr_hi = readl_relaxed(drvdata->base + EDPCSR_HI);
if (drvdata->edcidsr_present)
drvdata->edcidsr = readl_relaxed(drvdata->base + EDCIDSR);
if (drvdata->edvidsr_present)
drvdata->edvidsr = readl_relaxed(drvdata->base + EDVIDSR);
if (drvdata->pc_has_offset) {
arm_inst_offset = 8;
thumb_inst_offset = 4;
}
/* Handle thumb instruction */ if (pc & EDPCSR_THUMB) {
pc = (pc & EDPCSR_THUMB_INST_MASK) - thumb_inst_offset; return pc;
}
/* * Handle arm instruction offset, if the arm instruction * is not 4 byte alignment then it's possible the case * for implementation defined; keep original value for this * case and print info for notice.
*/ if (pc & BIT(1))
dev_emerg(drvdata->dev, "Instruction offset is implementation defined\n"); else
pc = (pc & EDPCSR_ARM_INST_MASK) - arm_inst_offset;
switch (mode) { case EDDEVID_IMPL_FULL:
drvdata->edvidsr_present = true;
fallthrough; case EDDEVID_IMPL_EDPCSR_EDCIDSR:
drvdata->edcidsr_present = true;
fallthrough; case EDDEVID_IMPL_EDPCSR: /* * In ARM DDI 0487A.k, the EDDEVID1.PCSROffset is used to * define if has the offset for PC sampling value; if read * back EDDEVID1.PCSROffset == 0x2, then this means the debug * module does not sample the instruction set state when * armv8 CPU in AArch32 state.
*/
drvdata->edpcsr_present =
((IS_ENABLED(CONFIG_64BIT) && pcsr_offset != 0) ||
(pcsr_offset != EDDEVID1_PCSR_NO_OFFSET_DIS_AARCH32));
staticint debug_enable_func(void)
{ struct debug_drvdata *drvdata; int cpu, ret = 0;
cpumask_t mask;
/* * Use cpumask to track which debug power domains have * been powered on and use it to handle failure case.
*/
cpumask_clear(&mask);
for_each_possible_cpu(cpu) {
drvdata = per_cpu(debug_drvdata, cpu); if (!drvdata) continue;
ret = pm_runtime_get_sync(drvdata->dev); if (ret < 0) goto err; else
cpumask_set_cpu(cpu, &mask);
}
return 0;
err: /* * If pm_runtime_get_sync() has failed, need rollback on * all the other CPUs that have been enabled before that.
*/
for_each_cpu(cpu, &mask) {
drvdata = per_cpu(debug_drvdata, cpu);
pm_runtime_put_noidle(drvdata->dev);
}
/* * Disable debug power domains, records the error and keep * circling through all other CPUs when an error has been * encountered.
*/
for_each_possible_cpu(cpu) {
drvdata = per_cpu(debug_drvdata, cpu); if (!drvdata) continue;
ret = pm_runtime_put(drvdata->dev); if (ret < 0)
err = ret;
}
/* Register function to be called for panic */
ret = atomic_notifier_chain_register(&panic_notifier_list,
&debug_notifier); if (ret) {
pr_err("%s: unable to register notifier: %d\n",
__func__, ret); goto err;
}
Die Informationen auf dieser Webseite wurden
nach bestem Wissen sorgfältig zusammengestellt. Es wird jedoch weder Vollständigkeit, noch Richtigkeit,
noch Qualität der bereit gestellten Informationen zugesichert.
Bemerkung:
Die farbliche Syntaxdarstellung und die Messung sind noch experimentell.