/* * VMAP'd stacks checking for stack overflow on exception using sp as a scratch * register, meaning SDEI has to switch to its own stack. We need two stacks as * a critical event may interrupt a normal event that has just taken a * synchronous exception, and is using sp as scratch register. For a critical * event interrupting a normal event, we can't reliably tell if we were on the * sdei stack. * For now, we allocate stacks when the driver is probed.
*/
DECLARE_PER_CPU(unsignedlong *, sdei_stack_normal_ptr);
DECLARE_PER_CPU(unsignedlong *, sdei_stack_critical_ptr);
staticint _init_sdei_scs(unsignedlong * __percpu *ptr, int cpu)
{ void *s;
s = scs_alloc(cpu_to_node(cpu)); if (!s) return -ENOMEM;
per_cpu(*ptr, cpu) = s;
return 0;
}
staticint init_sdei_scs(void)
{ int cpu; int err = 0;
if (!scs_is_enabled()) return 0;
for_each_possible_cpu(cpu) {
err = _init_sdei_scs(&sdei_shadow_call_stack_normal_ptr, cpu); if (err) break;
err = _init_sdei_scs(&sdei_shadow_call_stack_critical_ptr, cpu); if (err) break;
}
if (err)
free_sdei_scs();
return err;
}
unsignedlong sdei_arch_get_entry_point(int conduit)
{ /* * SDEI works between adjacent exception levels. If we booted at EL1 we * assume a hypervisor is marshalling events. If we booted at EL2 and * dropped to EL1 because we don't support VHE, then we can't support * SDEI.
*/ if (is_hyp_nvhe()) {
pr_err("Not supported on this hardware/boot configuration\n"); goto out_err;
}
/* * do_sdei_event() returns one of: * SDEI_EV_HANDLED - success, return to the interrupted context. * SDEI_EV_FAILED - failure, return this error code to firmare. * virtual-address - success, return to this address.
*/ unsignedlong __kprobes do_sdei_event(struct pt_regs *regs, struct sdei_registered_event *arg)
{
u32 mode; int i, err = 0; int clobbered_registers = 4;
u64 elr = read_sysreg(elr_el1);
u32 kernel_mode = read_sysreg(CurrentEL) | 1; /* +SPSel */ unsignedlong vbar = read_sysreg(vbar_el1);
if (arm64_kernel_unmapped_at_el0())
clobbered_registers++;
/* Retrieve the missing registers values */ for (i = 0; i < clobbered_registers; i++) { /* from within the handler, this call always succeeds */
sdei_api_event_context(i, ®s->regs[i]);
}
err = sdei_event_handler(regs, arg); if (err) return SDEI_EV_FAILED;
if (elr != read_sysreg(elr_el1)) { /* * We took a synchronous exception from the SDEI handler. * This could deadlock, and if you interrupt KVM it will * hyp-panic instead.
*/
pr_warn("unsafe: exception during handler\n");
}
/* * If we interrupted the kernel with interrupts masked, we always go * back to wherever we came from.
*/ if (mode == kernel_mode && !interrupts_enabled(regs)) return SDEI_EV_HANDLED;
/* * Otherwise, we pretend this was an IRQ. This lets user space tasks * receive signals before we return to them, and KVM to invoke it's * world switch to do the same. * * See DDI0487B.a Table D1-7 'Vector offsets from vector table base * address'.
*/ if (mode == kernel_mode) return vbar + 0x280; elseif (mode & PSR_MODE32_BIT) return vbar + 0x680;
return vbar + 0x480;
}
Messung V0.5
¤ Dauer der Verarbeitung: 0.20 Sekunden
(vorverarbeitet)
¤
Die Informationen auf dieser Webseite wurden
nach bestem Wissen sorgfältig zusammengestellt. Es wird jedoch weder Vollständigkeit, noch Richtigkeit,
noch Qualität der bereit gestellten Informationen zugesichert.
Bemerkung:
Die farbliche Syntaxdarstellung und die Messung sind noch experimentell.