// SPDX-License-Identifier: GPL-2.0-only /* * Based on arch/arm/kernel/signal.c * * Copyright (C) 1995-2009 Russell King * Copyright (C) 2012 ARM Ltd.
*/
/* * Holds any EL0-controlled state that influences unprivileged memory accesses. * This includes both accesses done in userspace and uaccess done in the kernel. * * This state needs to be carefully managed to ensure that it doesn't cause * uaccess to fail when setting up the signal frame, and the signal handler * itself also expects a well-defined state when entered.
*/ struct user_access_state {
u64 por_el0;
};
/* * Save the user access state into ua_state and reset it to disable any * restrictions.
*/ staticvoid save_reset_user_access_state(struct user_access_state *ua_state)
{ if (system_supports_poe()) {
u64 por_enable_all = 0;
ua_state->por_el0 = read_sysreg_s(SYS_POR_EL0);
write_sysreg_s(por_enable_all, SYS_POR_EL0); /* * No ISB required as we can tolerate spurious Overlay faults - * the fault handler will check again based on the new value * of POR_EL0.
*/
}
}
/* * Set the user access state for invoking the signal handler. * * No uaccess should be done after that function is called.
*/ staticvoid set_handler_user_access_state(void)
{ if (system_supports_poe())
write_sysreg_s(POR_EL0_INIT, SYS_POR_EL0);
}
/* * Restore the user access state to the values saved in ua_state. * * No uaccess should be done after that function is called.
*/ staticvoid restore_user_access_state(conststruct user_access_state *ua_state)
{ if (system_supports_poe())
write_sysreg_s(ua_state->por_el0, SYS_POR_EL0);
}
/* * Sanity limit on the approximate maximum size of signal frame we'll * try to generate. Stack alignment padding and the frame record are * not taken into account. This limit is not a guarantee and is * NOT ABI.
*/ #define SIGFRAME_MAXSZ SZ_256K
if (padded_size > user->limit - user->size &&
!user->extra_offset &&
extend) { int ret;
user->limit += EXTRA_CONTEXT_SIZE;
ret = __sigframe_alloc(user, &user->extra_offset, sizeof(struct extra_context), false); if (ret) {
user->limit -= EXTRA_CONTEXT_SIZE; return ret;
}
/* Reserve space for the __reserved[] terminator */
user->size += TERMINATOR_SIZE;
/* * Allow expansion up to SIGFRAME_MAXSZ, ensuring space for * the terminator:
*/
user->limit = SIGFRAME_MAXSZ - TERMINATOR_SIZE;
}
/* Still not enough space? Bad luck! */ if (padded_size > user->limit - user->size) return -ENOMEM;
*offset = user->size;
user->size += padded_size;
return 0;
}
/* * Allocate space for an optional record of <size> bytes in the user * signal frame. The offset from the signal frame base address to the * allocated block is assigned to *offset.
*/ staticint sigframe_alloc(struct rt_sigframe_user_layout *user, unsignedlong *offset, size_t size)
{ return __sigframe_alloc(user, offset, size, true);
}
/* Allocate the null terminator record and prevent further allocations */ staticint sigframe_alloc_end(struct rt_sigframe_user_layout *user)
{ int ret;
/* Un-reserve the space reserved for the terminator: */
user->limit += TERMINATOR_SIZE;
ret = sigframe_alloc(user, &user->end_offset, sizeof(struct _aarch64_ctx)); if (ret) return ret;
if (user->sve_size < sizeof(*user->sve)) return -EINVAL;
__get_user_error(user_vl, &(user->sve->vl), err);
__get_user_error(flags, &(user->sve->flags), err); if (err) return err;
sm = flags & SVE_SIG_FLAG_SM; if (sm) { if (!system_supports_sme()) return -EINVAL;
vl = task_get_sme_vl(current);
} else { /* * A SME only system use SVE for streaming mode so can * have a SVE formatted context with a zero VL and no * payload data.
*/ if (!system_supports_sve() && !system_supports_sme()) return -EINVAL;
vl = task_get_sve_vl(current);
}
if (user_vl != vl) return -EINVAL;
/* * Non-streaming SVE state may be preserved without an SVE payload, in * which case the SVE context only has a header with VL==0, and all * state can be restored from the FPSIMD context. * * Streaming SVE state is always preserved with an SVE payload. For * consistency and robustness, reject restoring streaming SVE state * without an SVE payload.
*/ if (!sm && user->sve_size == sizeof(*user->sve)) return restore_fpsimd_context(user);
vq = sve_vq_from_vl(vl);
if (user->sve_size < SVE_SIG_CONTEXT_SIZE(vq)) return -EINVAL;
sve_alloc(current, true); if (!current->thread.sve_state) {
clear_thread_flag(TIF_SVE); return -ENOMEM;
}
/* Turn any non-optimised out attempts to use these into a link error: */ externint preserve_tpidr2_context(void __user *ctx); externint restore_tpidr2_context(struct user_ctxs *user); externint preserve_za_context(void __user *ctx); externint restore_za_context(struct user_ctxs *user); externint preserve_zt_context(void __user *ctx); externint restore_zt_context(struct user_ctxs *user);
/* * If GCS is enabled we will add a cap token to the frame, * include it in the GCSPR_EL0 we report to support stack * switching via sigreturn if GCS is enabled. We do not allow * enabling via sigreturn so the token is only relevant for * threads with GCS enabled.
*/ if (task_gcs_el0_enabled(current))
gcspr -= 8;
/* If we are disabling disable everything */ if (!(enabled & PR_SHADOW_STACK_ENABLE))
enabled = 0;
current->thread.gcs_el0_mode = enabled;
/* * We let userspace set GCSPR_EL0 to anything here, we will * validate later in gcs_restore_signal().
*/
write_sysreg_s(gcspr, SYS_GCSPR_EL0);
return 0;
}
#else/* ! CONFIG_ARM64_GCS */
/* Turn any non-optimised out attempts to use these into a link error: */ externint preserve_gcs_context(void __user *ctx); externint restore_gcs_context(struct user_ctxs *user);
#ifdef CONFIG_ARM64_GCS staticint gcs_restore_signal(void)
{
u64 gcspr_el0, cap; int ret;
if (!system_supports_gcs()) return 0;
if (!(current->thread.gcs_el0_mode & PR_SHADOW_STACK_ENABLE)) return 0;
gcspr_el0 = read_sysreg_s(SYS_GCSPR_EL0);
/* * Ensure that any changes to the GCS done via GCS operations * are visible to the normal reads we do to validate the * token.
*/
gcsb_dsync();
/* * GCSPR_EL0 should be pointing at a capped GCS, read the cap. * We don't enforce that this is in a GCS page, if it is not * then faults will be generated on GCS operations - the main * concern is to protect GCS pages.
*/
ret = copy_from_user(&cap, (unsignedlong __user *)gcspr_el0, sizeof(cap)); if (ret) return -EFAULT;
/* * Check that the cap is the actual GCS before replacing it.
*/ if (cap != GCS_SIGNAL_CAP(gcspr_el0)) return -EINVAL;
/* Invalidate the token to prevent reuse */
put_user_gcs(0, (unsignedlong __user *)gcspr_el0, &ret); if (ret != 0) return -EFAULT;
/* * Determine the layout of optional records in the signal frame * * add_all: if true, lays out the biggest possible signal frame for * this task; otherwise, generates a layout for the current state * of the task.
*/ staticint setup_sigframe_layout(struct rt_sigframe_user_layout *user, bool add_all)
{ int err;
if (system_supports_fpsimd()) {
err = sigframe_alloc(user, &user->fpsimd_offset, sizeof(struct fpsimd_context)); if (err) return err;
}
/* fault information, if valid */ if (add_all || current->thread.fault_code) {
err = sigframe_alloc(user, &user->esr_offset, sizeof(struct esr_context)); if (err) return err;
}
/* set up the stack frame for unwinding */
__put_user_error(regs->regs[29], &user->next_frame->fp, err);
__put_user_error(regs->regs[30], &user->next_frame->lr, err);
for (i = 0; i < 31; i++)
__put_user_error(regs->regs[i], &sf->uc.uc_mcontext.regs[i],
err);
__put_user_error(regs->sp, &sf->uc.uc_mcontext.sp, err);
__put_user_error(regs->pc, &sf->uc.uc_mcontext.pc, err);
__put_user_error(regs->pstate, &sf->uc.uc_mcontext.pstate, err);
extra = (struct extra_context __user *)userp;
userp += EXTRA_CONTEXT_SIZE;
end = (struct _aarch64_ctx __user *)userp;
userp += TERMINATOR_SIZE;
/* * extra_datap is just written to the signal frame. * The value gets cast back to a void __user * * during sigreturn.
*/
extra_datap = (__force u64)userp;
extra_size = sfp + round_up(user->size, 16) - userp;
/* * Check that we can actually write to the signal frame.
*/ if (!access_ok(user->sigframe, sp_top - sp)) return -EFAULT;
return 0;
}
#ifdef CONFIG_ARM64_GCS
staticint gcs_signal_entry(__sigrestore_t sigtramp, struct ksignal *ksig)
{
u64 gcspr_el0; int ret = 0;
if (!system_supports_gcs()) return 0;
if (!task_gcs_el0_enabled(current)) return 0;
/* * We are entering a signal handler, current register state is * active.
*/
gcspr_el0 = read_sysreg_s(SYS_GCSPR_EL0);
/* * Push a cap and the GCS entry for the trampoline onto the GCS.
*/
put_user_gcs((unsignedlong)sigtramp,
(unsignedlong __user *)(gcspr_el0 - 16), &ret);
put_user_gcs(GCS_SIGNAL_CAP(gcspr_el0 - 8),
(unsignedlong __user *)(gcspr_el0 - 8), &ret); if (ret != 0) return ret;
err = gcs_signal_entry(sigtramp, ksig); if (err) return err;
/* * We must not fail from this point onwards. We are going to update * registers, including SP, in order to invoke the signal handler. If * we failed and attempted to deliver a nested SIGSEGV to a handler * after that point, the subsequent sigreturn would end up restoring * the (partial) state for the original signal handler.
*/
/* * Signal delivery is a (wacky) indirect function call in * userspace, so simulate the same setting of BTYPE as a BLR * <register containing the signal handler entry point>. * Signal delivery to a location in a PROT_BTI guarded page * that is not a function entry point will now trigger a * SIGILL in userspace. * * If the signal handler entry point is not in a PROT_BTI * guarded page, this is harmless.
*/ if (system_supports_bti()) {
regs->pstate &= ~PSR_BTYPE_MASK;
regs->pstate |= PSR_BTYPE_C;
}
/* TCO (Tag Check Override) always cleared for signal handlers */
regs->pstate &= ~PSR_TCO_BIT;
/* Signal handlers are invoked with ZA and streaming mode disabled */ if (system_supports_sme()) {
task_smstop_sm(current);
current->thread.svcr &= ~SVCR_ZA_MASK;
write_sysreg_s(0, SYS_TPIDR2_EL0);
}
/* * OK, we're invoking a handler
*/ staticvoid handle_signal(struct ksignal *ksig, struct pt_regs *regs)
{
sigset_t *oldset = sigmask_to_save(); int usig = ksig->sig; int ret;
rseq_signal_deliver(ksig, regs);
/* * Set up the stack frame
*/ if (is_compat_task()) { if (ksig->ka.sa.sa_flags & SA_SIGINFO)
ret = compat_setup_rt_frame(usig, ksig, oldset, regs); else
ret = compat_setup_frame(usig, ksig, oldset, regs);
} else {
ret = setup_rt_frame(usig, ksig, oldset, regs);
}
/* * Check that the resulting registers are actually sane.
*/
ret |= !valid_user_regs(®s->user_regs, current);
/* Step into the signal handler if we are stepping */
signal_setup_done(ret, ksig, test_thread_flag(TIF_SINGLESTEP));
}
/* * Note that 'init' is a special process: it doesn't get signals it doesn't * want to handle. Thus you cannot kill init even with a SIGKILL even by * mistake. * * Note that we go through the signals twice: once to check the signals that * the kernel can handle, and then we build all the user-level signal handling * stack-frames in one go after that.
*/ void do_signal(struct pt_regs *regs)
{ unsignedlong continue_addr = 0, restart_addr = 0; int retval = 0; struct ksignal ksig; bool syscall = in_syscall(regs);
/* * If we were from a system call, check for system call restarting...
*/ if (syscall) {
continue_addr = regs->pc;
restart_addr = continue_addr - (compat_thumb_mode(regs) ? 2 : 4);
retval = regs->regs[0];
/* * Avoid additional syscall restarting via ret_to_user.
*/
forget_syscall(regs);
/* * Prepare for system call restart. We do this here so that a * debugger will see the already changed PC.
*/ switch (retval) { case -ERESTARTNOHAND: case -ERESTARTSYS: case -ERESTARTNOINTR: case -ERESTART_RESTARTBLOCK:
regs->regs[0] = regs->orig_x0;
regs->pc = restart_addr; break;
}
}
/* * Get the signal to deliver. When running under ptrace, at this point * the debugger may change all of our registers.
*/ if (get_signal(&ksig)) { /* * Depending on the signal settings, we may need to revert the * decision to restart the system call, but skip this if a * debugger has chosen to restart at a different PC.
*/ if (regs->pc == restart_addr &&
(retval == -ERESTARTNOHAND ||
retval == -ERESTART_RESTARTBLOCK ||
(retval == -ERESTARTSYS &&
!(ksig.ka.sa.sa_flags & SA_RESTART)))) {
syscall_set_return_value(current, regs, -EINTR, 0);
regs->pc = continue_addr;
}
handle_signal(&ksig, regs); return;
}
/* * Handle restarting a different system call. As above, if a debugger * has chosen to restart at a different PC, ignore the restart.
*/ if (syscall && regs->pc == restart_addr) { if (retval == -ERESTART_RESTARTBLOCK)
setup_restart_syscall(regs);
user_rewind_single_step(current);
}
restore_saved_sigmask();
}
unsignedlong __ro_after_init signal_minsigstksz;
/* * Determine the stack space required for guaranteed signal devliery. * This function is used to populate AT_MINSIGSTKSZ at process startup. * cpufeatures setup is assumed to be complete.
*/ void __init minsigstksz_setup(void)
{ struct rt_sigframe_user_layout user;
init_user_layout(&user);
/* * If this fails, SIGFRAME_MAXSZ needs to be enlarged. It won't * be big enough, but it's our best guess:
*/ if (WARN_ON(setup_sigframe_layout(&user, true))) return;
Die Informationen auf dieser Webseite wurden
nach bestem Wissen sorgfältig zusammengestellt. Es wird jedoch weder Vollständigkeit, noch Richtigkeit,
noch Qualität der bereit gestellten Informationen zugesichert.
Bemerkung:
Die farbliche Syntaxdarstellung und die Messung sind noch experimentell.