// SPDX-License-Identifier: GPL-2.0-only /* * linux/kernel/signal.c * * Copyright (C) 1991, 1992 Linus Torvalds * * 1997-11-02 Modified for POSIX.1b signals by Richard Henderson * * 2003-06-02 Jim Houston - Concurrent Computer Corp. * Changes to use preallocated sigqueue structures * to allow signals to be sent reliably.
*/
/* Only allow kernel generated signals to this kthread */ if (unlikely((t->flags & PF_KTHREAD) &&
(handler == SIG_KTHREAD_KERNEL) && !force)) returntrue;
return sig_handler_ignored(handler, sig);
}
staticbool sig_ignored(struct task_struct *t, int sig, bool force)
{ /* * Blocked signals are never ignored, since the * signal handler may change by the time it is * unblocked.
*/ if (sigismember(&t->blocked, sig) || sigismember(&t->real_blocked, sig)) returnfalse;
/* * Tracers may want to know about even ignored signal unless it * is SIGKILL which can't be reported anyway but can be ignored * by SIGNAL_UNKILLABLE task.
*/ if (t->ptrace && sig != SIGKILL) returnfalse;
return sig_task_ignored(t, sig, force);
}
/* * Re-calculate pending state from the set of locally pending * signals, globally pending signals, and blocked signals.
*/ staticinlinebool has_pending_signals(sigset_t *signal, sigset_t *blocked)
{ unsignedlong ready; long i;
/* * We must never clear the flag in another thread, or in current * when it's possible the current syscall is returning -ERESTART*. * So we don't clear it here, and only callers who know they should do.
*/ returnfalse;
}
void recalc_sigpending(void)
{ if (!recalc_sigpending_tsk(current) && !freezing(current)) { if (unlikely(test_thread_flag(TIF_SIGPENDING)))
clear_thread_flag(TIF_SIGPENDING);
}
}
EXPORT_SYMBOL(recalc_sigpending);
void calculate_sigpending(void)
{ /* Have any signals or users of TIF_SIGPENDING been delayed * until after fork?
*/
spin_lock_irq(¤t->sighand->siglock);
set_tsk_thread_flag(current, TIF_SIGPENDING);
recalc_sigpending();
spin_unlock_irq(¤t->sighand->siglock);
}
/* Given the mask, find the first available signal that should be serviced. */
int next_signal(struct sigpending *pending, sigset_t *mask)
{ unsignedlong i, *s, *m, x; int sig = 0;
s = pending->signal.sig;
m = mask->sig;
/* * Handle the first word specially: it contains the * synchronous signals that need to be dequeued first.
*/
x = *s &~ *m; if (x) { if (x & SYNCHRONOUS_MASK)
x &= SYNCHRONOUS_MASK;
sig = ffz(~x) + 1; return sig;
}
switch (_NSIG_WORDS) { default: for (i = 1; i < _NSIG_WORDS; ++i) {
x = *++s &~ *++m; if (!x) continue;
sig = ffz(~x) + i*_NSIG_BPW + 1; break;
} break;
case 2:
x = s[1] &~ m[1]; if (!x) break;
sig = ffz(~x) + _NSIG_BPW + 1; break;
pr_info("%s/%d: reached RLIMIT_SIGPENDING, dropped signal %d\n",
current->comm, current->pid, sig);
}
/** * task_set_jobctl_pending - set jobctl pending bits * @task: target task * @mask: pending bits to set * * Clear @mask from @task->jobctl. @mask must be subset of * %JOBCTL_PENDING_MASK | %JOBCTL_STOP_CONSUME | %JOBCTL_STOP_SIGMASK | * %JOBCTL_TRAPPING. If stop signo is being set, the existing signo is * cleared. If @task is already being killed or exiting, this function * becomes noop. * * CONTEXT: * Must be called with @task->sighand->siglock held. * * RETURNS: * %true if @mask is set, %false if made noop because @task was dying.
*/ bool task_set_jobctl_pending(struct task_struct *task, unsignedlong mask)
{
BUG_ON(mask & ~(JOBCTL_PENDING_MASK | JOBCTL_STOP_CONSUME |
JOBCTL_STOP_SIGMASK | JOBCTL_TRAPPING));
BUG_ON((mask & JOBCTL_TRAPPING) && !(mask & JOBCTL_PENDING_MASK));
if (unlikely(fatal_signal_pending(task) || (task->flags & PF_EXITING))) returnfalse;
if (mask & JOBCTL_STOP_SIGMASK)
task->jobctl &= ~JOBCTL_STOP_SIGMASK;
task->jobctl |= mask; returntrue;
}
/** * task_clear_jobctl_trapping - clear jobctl trapping bit * @task: target task * * If JOBCTL_TRAPPING is set, a ptracer is waiting for us to enter TRACED. * Clear it and wake up the ptracer. Note that we don't need any further * locking. @task->siglock guarantees that @task->parent points to the * ptracer. * * CONTEXT: * Must be called with @task->sighand->siglock held.
*/ void task_clear_jobctl_trapping(struct task_struct *task)
{ if (unlikely(task->jobctl & JOBCTL_TRAPPING)) {
task->jobctl &= ~JOBCTL_TRAPPING;
smp_mb(); /* advised by wake_up_bit() */
wake_up_bit(&task->jobctl, JOBCTL_TRAPPING_BIT);
}
}
/** * task_clear_jobctl_pending - clear jobctl pending bits * @task: target task * @mask: pending bits to clear * * Clear @mask from @task->jobctl. @mask must be subset of * %JOBCTL_PENDING_MASK. If %JOBCTL_STOP_PENDING is being cleared, other * STOP bits are cleared together. * * If clearing of @mask leaves no stop or trap pending, this function calls * task_clear_jobctl_trapping(). * * CONTEXT: * Must be called with @task->sighand->siglock held.
*/ void task_clear_jobctl_pending(struct task_struct *task, unsignedlong mask)
{
BUG_ON(mask & ~JOBCTL_PENDING_MASK);
if (mask & JOBCTL_STOP_PENDING)
mask |= JOBCTL_STOP_CONSUME | JOBCTL_STOP_DEQUEUED;
task->jobctl &= ~mask;
if (!(task->jobctl & JOBCTL_PENDING_MASK))
task_clear_jobctl_trapping(task);
}
/** * task_participate_group_stop - participate in a group stop * @task: task participating in a group stop * * @task has %JOBCTL_STOP_PENDING set and is participating in a group stop. * Group stop states are cleared and the group stop count is consumed if * %JOBCTL_STOP_CONSUME was set. If the consumption completes the group * stop, the appropriate `SIGNAL_*` flags are set. * * CONTEXT: * Must be called with @task->sighand->siglock held. * * RETURNS: * %true if group stop completion should be notified to the parent, %false * otherwise.
*/ staticbool task_participate_group_stop(struct task_struct *task)
{ struct signal_struct *sig = task->signal; bool consume = task->jobctl & JOBCTL_STOP_CONSUME;
if (!WARN_ON_ONCE(sig->group_stop_count == 0))
sig->group_stop_count--;
/* * Tell the caller to notify completion iff we are entering into a * fresh group stop. Read comment in do_signal_stop() for details.
*/ if (!sig->group_stop_count && !(sig->flags & SIGNAL_STOP_STOPPED)) {
signal_set_stop_flags(sig, SIGNAL_STOP_STOPPED); returntrue;
} returnfalse;
}
/* Have the new thread join an on-going signal group stop */
task_set_jobctl_pending(task, mask | JOBCTL_STOP_PENDING);
}
staticstruct ucounts *sig_get_ucounts(struct task_struct *t, int sig, int override_rlimit)
{ struct ucounts *ucounts; long sigpending;
/* * Protect access to @t credentials. This can go away when all * callers hold rcu read lock. * * NOTE! A pending signal will hold on to the user refcount, * and we get/put the refcount only when the sigpending count * changes from/to zero.
*/
rcu_read_lock();
ucounts = task_ucounts(t);
sigpending = inc_rlimit_get_ucounts(ucounts, UCOUNT_RLIMIT_SIGPENDING,
override_rlimit);
rcu_read_unlock(); if (!sigpending) return NULL;
/* * allocate a new signal queue record * - this may be called without locks if and only if t == current, otherwise an * appropriate lock must be held to stop the target task from exiting
*/ staticstruct sigqueue *sigqueue_alloc(int sig, struct task_struct *t, gfp_t gfp_flags, int override_rlimit)
{ struct ucounts *ucounts = sig_get_ucounts(t, sig, override_rlimit); struct sigqueue *q;
/* * Collect the siginfo appropriate to this signal. Check if * there is another siginfo for the same signal.
*/
list_for_each_entry(q, &list->list, list) { if (q->info.si_signo == sig) { if (first) goto still_pending;
first = q;
}
}
sigdelset(&list->signal, sig);
if (first) {
still_pending:
list_del_init(&first->list);
copy_siginfo(info, &first->info);
/* * posix-timer signals are preallocated and freed when the last * reference count is dropped in posixtimer_deliver_signal() or * immediately on timer deletion when the signal is not pending. * Spare the extra round through __sigqueue_free() which is * ignoring preallocated signals.
*/ if (unlikely((first->flags & SIGQUEUE_PREALLOC) && (info->si_code == SI_TIMER)))
*timer_sigq = first; else
__sigqueue_free(first);
} else { /* * Ok, it wasn't in the queue. This must be * a fast-pathed signal or we must have been * out of queue space. So zero out the info.
*/
clear_siginfo(info);
info->si_signo = sig;
info->si_errno = 0;
info->si_code = SI_USER;
info->si_pid = 0;
info->si_uid = 0;
}
}
staticint __dequeue_signal(struct sigpending *pending, sigset_t *mask,
kernel_siginfo_t *info, struct sigqueue **timer_sigq)
{ int sig = next_signal(pending, mask);
if (sig)
collect_signal(sig, pending, info, timer_sigq); return sig;
}
/* * Try to dequeue a signal. If a deliverable signal is found fill in the * caller provided siginfo and return the signal number. Otherwise return * 0.
*/ int dequeue_signal(sigset_t *mask, kernel_siginfo_t *info, enum pid_type *type)
{ struct task_struct *tsk = current; struct sigqueue *timer_sigq; int signr;
if (unlikely(signr == SIGALRM))
posixtimer_rearm_itimer(tsk);
}
recalc_sigpending(); if (!signr) return 0;
if (unlikely(sig_kernel_stop(signr))) { /* * Set a marker that we have dequeued a stop signal. Our * caller might release the siglock and then the pending * stop signal it is about to process is no longer in the * pending bitmasks, but must still be cleared by a SIGCONT * (and overruled by a SIGKILL). So those cases clear this * shared flag after we've set it. Note that this flag may * remain set after the signal we return is ignored or * handled. That doesn't matter because its only purpose * is to alert stop-signal processing code when another * processor has come along and cleared the flag.
*/
current->jobctl |= JOBCTL_STOP_DEQUEUED;
}
if (IS_ENABLED(CONFIG_POSIX_TIMERS) && unlikely(timer_sigq)) { if (!posixtimer_deliver_signal(info, timer_sigq)) goto again;
}
/* * Might a synchronous signal be in the queue?
*/ if (!((pending->signal.sig[0] & ~tsk->blocked.sig[0]) & SYNCHRONOUS_MASK)) return 0;
/* * Return the first synchronous signal in the queue.
*/
list_for_each_entry(q, &pending->list, list) { /* Synchronous signals have a positive si_code */ if ((q->info.si_code > SI_USER) &&
(sigmask(q->info.si_signo) & SYNCHRONOUS_MASK)) {
sync = q; goto next;
}
} return 0;
next: /* * Check if there is another siginfo for the same signal.
*/
list_for_each_entry_continue(q, &pending->list, list) { if (q->info.si_signo == sync->info.si_signo) goto still_pending;
}
/* * Tell a process that it has a new active signal.. * * NOTE! we rely on the previous spin_lock to * lock interrupts for us! We can only be called with * "siglock" held, and the local interrupt must * have been disabled when that got acquired! * * No need to set need_resched since signal event passing * goes through ->blocked
*/ void signal_wake_up_state(struct task_struct *t, unsignedint state)
{
lockdep_assert_held(&t->sighand->siglock);
set_tsk_thread_flag(t, TIF_SIGPENDING);
/* * TASK_WAKEKILL also means wake it up in the stopped/traced/killable * case. We don't check t->state here because there is a race with it * executing another processor and just now entering stopped state. * By using wake_up_state, we ensure the process will wake up and * handle its death signal.
*/ if (!wake_up_state(t, state | TASK_INTERRUPTIBLE))
kick_process(t);
}
/* Remove signals in mask from the pending set and queue. */ staticvoid flush_sigqueue_mask(struct task_struct *p, sigset_t *mask, struct sigpending *s)
{ struct sigqueue *q, *n;
sigset_t m;
lockdep_assert_held(&p->sighand->siglock);
sigandsets(&m, mask, &s->signal); if (sigisemptyset(&m)) return;
sigandnsets(&s->signal, &s->signal, mask);
list_for_each_entry_safe(q, n, &s->list, list) { if (sigismember(mask, q->info.si_signo)) {
list_del_init(&q->list);
sigqueue_free_ignored(p, q);
}
}
}
staticinlineint is_si_special(conststruct kernel_siginfo *info)
{ return info <= SEND_SIG_PRIV;
}
/* * Bad permissions for sending the signal * - the caller must hold the RCU read lock
*/ staticint check_kill_permission(int sig, struct kernel_siginfo *info, struct task_struct *t)
{ struct pid *sid; int error;
if (!valid_signal(sig)) return -EINVAL;
if (!si_fromuser(info)) return 0;
error = audit_signal_info(sig, t); /* Let audit system see the signal */ if (error) return error;
if (!same_thread_group(current, t) &&
!kill_ok_by_cred(t)) { switch (sig) { case SIGCONT:
sid = task_session(t); /* * We don't return the error if sid == NULL. The * task was unhashed, the caller must notice this.
*/ if (!sid || sid == task_session(current)) break;
fallthrough; default: return -EPERM;
}
}
return security_task_kill(t, info, sig, NULL);
}
/** * ptrace_trap_notify - schedule trap to notify ptracer * @t: tracee wanting to notify tracer * * This function schedules sticky ptrace trap which is cleared on the next * TRAP_STOP to notify ptracer of an event. @t must have been seized by * ptracer. * * If @t is running, STOP trap will be taken. If trapped for STOP and * ptracer is listening for events, tracee is woken up so that it can * re-trap for the new event. If trapped otherwise, STOP trap will be * eventually taken without returning to userland after the existing traps * are finished by PTRACE_CONT. * * CONTEXT: * Must be called with @task->sighand->siglock held.
*/ staticvoid ptrace_trap_notify(struct task_struct *t)
{
WARN_ON_ONCE(!(t->ptrace & PT_SEIZED));
lockdep_assert_held(&t->sighand->siglock);
/* * Handle magic process-wide effects of stop/continue signals. Unlike * the signal actions, these happen immediately at signal-generation * time regardless of blocking, ignoring, or handling. This does the * actual continuing for SIGCONT, but not the actual stopping for stop * signals. The process stop is done as a signal action for SIG_DFL. * * Returns true if the signal should be actually delivered, otherwise * it should be dropped.
*/ staticbool prepare_signal(int sig, struct task_struct *p, bool force)
{ struct signal_struct *signal = p->signal; struct task_struct *t;
sigset_t flush;
if (signal->flags & SIGNAL_GROUP_EXIT) { if (signal->core_state) return sig == SIGKILL; /* * The process is in the middle of dying, drop the signal.
*/ returnfalse;
} elseif (sig_kernel_stop(sig)) { /* * This is a stop signal. Remove SIGCONT from all queues.
*/
siginitset(&flush, sigmask(SIGCONT));
flush_sigqueue_mask(p, &flush, &signal->shared_pending);
for_each_thread(p, t)
flush_sigqueue_mask(p, &flush, &t->pending);
} elseif (sig == SIGCONT) { unsignedint why; /* * Remove all stop signals from all queues, wake all threads.
*/
siginitset(&flush, SIG_KERNEL_STOP_MASK);
flush_sigqueue_mask(p, &flush, &signal->shared_pending);
for_each_thread(p, t) {
flush_sigqueue_mask(p, &flush, &t->pending);
task_clear_jobctl_pending(t, JOBCTL_STOP_PENDING); if (likely(!(t->ptrace & PT_SEIZED))) {
t->jobctl &= ~JOBCTL_STOPPED;
wake_up_state(t, __TASK_STOPPED);
} else
ptrace_trap_notify(t);
}
/* * Notify the parent with CLD_CONTINUED if we were stopped. * * If we were in the middle of a group stop, we pretend it * was already finished, and then continued. Since SIGCHLD * doesn't queue we report only CLD_STOPPED, as if the next * CLD_CONTINUED was dropped.
*/
why = 0; if (signal->flags & SIGNAL_STOP_STOPPED)
why |= SIGNAL_CLD_CONTINUED; elseif (signal->group_stop_count)
why |= SIGNAL_CLD_STOPPED;
if (why) { /* * The first thread which returns from do_signal_stop() * will take ->siglock, notice SIGNAL_CLD_MASK, and * notify its parent. See get_signal().
*/
signal_set_stop_flags(signal, why | SIGNAL_STOP_CONTINUED);
signal->group_stop_count = 0;
signal->group_exit_code = 0;
}
}
return !sig_ignored(p, sig, force);
}
/* * Test if P wants to take SIG. After we've checked all threads with this, * it's equivalent to finding no threads not blocking SIG. Any threads not * blocking SIG were ruled out because they are not running and already * have pending signals. Such threads will dequeue from the shared queue * as soon as they're available, so putting the signal on the shared queue * will be equivalent to sending it to one such thread.
*/ staticinlinebool wants_signal(int sig, struct task_struct *p)
{ if (sigismember(&p->blocked, sig)) returnfalse;
/* * Now find a thread we can wake up to take the signal off the queue. * * Try the suggested task first (may or may not be the main thread).
*/ if (wants_signal(sig, p))
t = p; elseif ((type == PIDTYPE_PID) || thread_group_empty(p)) /* * There is just one thread and it does not need to be woken. * It will dequeue unblocked signals before it runs again.
*/ return; else { /* * Otherwise try to find a suitable thread.
*/
t = signal->curr_target; while (!wants_signal(sig, t)) {
t = next_thread(t); if (t == signal->curr_target) /* * No thread needs to be woken. * Any eligible threads will see * the signal in the queue soon.
*/ return;
}
signal->curr_target = t;
}
/* * Found a killable thread. If the signal will be fatal, * then start taking the whole group down immediately.
*/ if (sig_fatal(p, sig) &&
(signal->core_state || !(signal->flags & SIGNAL_GROUP_EXIT)) &&
!sigismember(&t->real_blocked, sig) &&
(sig == SIGKILL || !p->ptrace)) { /* * This signal will be fatal to the whole group.
*/ if (!sig_kernel_coredump(sig)) { /* * Start a group exit and wake everybody up. * This way we don't have other threads * running and doing things after a slower * thread has the fatal signal pending.
*/
signal->flags = SIGNAL_GROUP_EXIT;
signal->group_exit_code = sig;
signal->group_stop_count = 0;
__for_each_thread(signal, t) {
task_clear_jobctl_pending(t, JOBCTL_PENDING_MASK);
sigaddset(&t->pending.signal, SIGKILL);
signal_wake_up(t, 1);
} return;
}
}
/* * The signal is already in the shared-pending queue. * Tell the chosen thread to wake up and dequeue it.
*/
signal_wake_up(t, sig == SIGKILL); return;
}
staticint __send_signal_locked(int sig, struct kernel_siginfo *info, struct task_struct *t, enum pid_type type, bool force)
{ struct sigpending *pending; struct sigqueue *q; int override_rlimit; int ret = 0, result;
lockdep_assert_held(&t->sighand->siglock);
result = TRACE_SIGNAL_IGNORED; if (!prepare_signal(sig, t, force)) goto ret;
pending = (type != PIDTYPE_PID) ? &t->signal->shared_pending : &t->pending; /* * Short-circuit ignored signals and support queuing * exactly one non-rt signal, so that we can get more * detailed information about the cause of the signal.
*/
result = TRACE_SIGNAL_ALREADY_PENDING; if (legacy_queue(pending, sig)) goto ret;
result = TRACE_SIGNAL_DELIVERED; /* * Skip useless siginfo allocation for SIGKILL and kernel threads.
*/ if ((sig == SIGKILL) || (t->flags & PF_KTHREAD)) goto out_set;
/* * Real-time signals must be queued if sent by sigqueue, or * some other real-time mechanism. It is implementation * defined whether kill() does so. We attempt to do so, on * the principle of least surprise, but since kill is not * allowed to fail with EAGAIN when low on memory we just * make sure at least one signal gets delivered and don't * pass on the info struct.
*/ if (sig < SIGRTMIN)
override_rlimit = (is_si_special(info) || info->si_code >= 0); else
override_rlimit = 0;
if (q) {
list_add_tail(&q->list, &pending->list); switch ((unsignedlong) info) { case (unsignedlong) SEND_SIG_NOINFO:
clear_siginfo(&q->info);
q->info.si_signo = sig;
q->info.si_errno = 0;
q->info.si_code = SI_USER;
q->info.si_pid = task_tgid_nr_ns(current,
task_active_pid_ns(t));
rcu_read_lock();
q->info.si_uid =
from_kuid_munged(task_cred_xxx(t, user_ns),
current_uid());
rcu_read_unlock(); break; case (unsignedlong) SEND_SIG_PRIV:
clear_siginfo(&q->info);
q->info.si_signo = sig;
q->info.si_errno = 0;
q->info.si_code = SI_KERNEL;
q->info.si_pid = 0;
q->info.si_uid = 0; break; default:
copy_siginfo(&q->info, info); break;
}
} elseif (!is_si_special(info) &&
sig >= SIGRTMIN && info->si_code != SI_USER) { /* * Queue overflow, abort. We may abort if the * signal was rt and sent by user using something * other than kill().
*/
result = TRACE_SIGNAL_OVERFLOW_FAIL;
ret = -EAGAIN; goto ret;
} else { /* * This is a silent loss of information. We still * send the signal, but the *info bits are lost.
*/
result = TRACE_SIGNAL_LOSE_INFO;
}
staticinlinebool has_si_pid_and_uid(struct kernel_siginfo *info)
{ bool ret = false; switch (siginfo_layout(info->si_signo, info->si_code)) { case SIL_KILL: case SIL_CHLD: case SIL_RT:
ret = true; break; case SIL_TIMER: case SIL_POLL: case SIL_FAULT: case SIL_FAULT_TRAPNO: case SIL_FAULT_MCEERR: case SIL_FAULT_BNDERR: case SIL_FAULT_PKUERR: case SIL_FAULT_PERF_EVENT: case SIL_SYS:
ret = false; break;
} return ret;
}
int send_signal_locked(int sig, struct kernel_siginfo *info, struct task_struct *t, enum pid_type type)
{ /* Should SIGKILL or SIGSTOP be received by a pid namespace init? */ bool force = false;
if (info == SEND_SIG_NOINFO) { /* Force if sent from an ancestor pid namespace */
force = !task_pid_nr_ns(current, task_active_pid_ns(t));
} elseif (info == SEND_SIG_PRIV) { /* Don't ignore kernel generated signals */
force = true;
} elseif (has_si_pid_and_uid(info)) { /* SIGKILL and SIGSTOP is special or has ids */ struct user_namespace *t_user_ns;
int do_send_sig_info(int sig, struct kernel_siginfo *info, struct task_struct *p, enum pid_type type)
{ unsignedlong flags; int ret = -ESRCH;
if (lock_task_sighand(p, &flags)) {
ret = send_signal_locked(sig, info, p, type);
unlock_task_sighand(p, &flags);
}
return ret;
}
enum sig_handler {
HANDLER_CURRENT, /* If reachable use the current handler */
HANDLER_SIG_DFL, /* Always use SIG_DFL handler semantics */
HANDLER_EXIT, /* Only visible as the process exit code */
};
/* * Force a signal that the process can't ignore: if necessary * we unblock the signal and change any SIG_IGN to SIG_DFL. * * Note: If we unblock the signal, we always reset it to SIG_DFL, * since we do not want to have a signal handler that was blocked * be invoked when user space had explicitly blocked it. * * We don't want to have recursive SIGSEGV's etc, for example, * that is why we also clear SIGNAL_UNKILLABLE.
*/ staticint
force_sig_info_to_task(struct kernel_siginfo *info, struct task_struct *t, enum sig_handler handler)
{ unsignedlongint flags; int ret, blocked, ignored; struct k_sigaction *action; int sig = info->si_signo;
spin_lock_irqsave(&t->sighand->siglock, flags);
action = &t->sighand->action[sig-1];
ignored = action->sa.sa_handler == SIG_IGN;
blocked = sigismember(&t->blocked, sig); if (blocked || ignored || (handler != HANDLER_CURRENT)) {
action->sa.sa_handler = SIG_DFL; if (handler == HANDLER_EXIT)
action->sa.sa_flags |= SA_IMMUTABLE; if (blocked)
sigdelset(&t->blocked, sig);
} /* * Don't clear SIGNAL_UNKILLABLE for traced tasks, users won't expect * debugging to leave init killable. But HANDLER_EXIT is always fatal.
*/ if (action->sa.sa_handler == SIG_DFL &&
(!t->ptrace || (handler == HANDLER_EXIT)))
t->signal->flags &= ~SIGNAL_UNKILLABLE;
ret = send_signal_locked(sig, info, t, PIDTYPE_PID); /* This can happen if the signal was already pending and blocked */ if (!task_sigpending(t))
signal_wake_up(t, 0);
spin_unlock_irqrestore(&t->sighand->siglock, flags);
return ret;
}
int force_sig_info(struct kernel_siginfo *info)
{ return force_sig_info_to_task(info, current, HANDLER_CURRENT);
}
/* * Nuke all other threads in the group.
*/ int zap_other_threads(struct task_struct *p)
{ struct task_struct *t; int count = 0;
rcu_read_lock(); for (;;) {
sighand = rcu_dereference(tsk->sighand); if (unlikely(sighand == NULL)) break;
/* * This sighand can be already freed and even reused, but * we rely on SLAB_TYPESAFE_BY_RCU and sighand_ctor() which * initializes ->siglock: this slab can't go away, it has * the same object type, ->siglock can't be reinitialized. * * We need to ensure that tsk->sighand is still the same * after we take the lock, we can race with de_thread() or * __exit_signal(). In the latter case the next iteration * must see ->sighand == NULL.
*/
spin_lock_irqsave(&sighand->siglock, *flags); if (likely(sighand == rcu_access_pointer(tsk->sighand))) break;
spin_unlock_irqrestore(&sighand->siglock, *flags);
}
rcu_read_unlock();
/* * send signal info to all the members of a thread group or to the * individual thread if type == PIDTYPE_PID.
*/ int group_send_sig_info(int sig, struct kernel_siginfo *info, struct task_struct *p, enum pid_type type)
{ int ret;
rcu_read_lock();
ret = check_kill_permission(sig, info, p);
rcu_read_unlock();
if (!ret && sig)
ret = do_send_sig_info(sig, info, p, type);
return ret;
}
/* * __kill_pgrp_info() sends a signal to a process group: this is what the tty * control characters do (^C, ^Z etc) * - the caller must hold at least a readlock on tasklist_lock
*/ int __kill_pgrp_info(int sig, struct kernel_siginfo *info, struct pid *pgrp)
{ struct task_struct *p = NULL; int ret = -ESRCH;
do_each_pid_task(pgrp, PIDTYPE_PGID, p) { int err = group_send_sig_info(sig, info, p, PIDTYPE_PGID); /* * If group_send_sig_info() succeeds at least once ret * becomes 0 and after that the code below has no effect. * Otherwise we return the last err or -ESRCH if this * process group is empty.
*/ if (ret)
ret = err;
} while_each_pid_task(pgrp, PIDTYPE_PGID, p);
for (;;) {
rcu_read_lock();
p = pid_task(pid, PIDTYPE_PID); if (p)
error = group_send_sig_info(sig, info, p, type);
rcu_read_unlock(); if (likely(!p || error != -ESRCH)) return error; /* * The task was unhashed in between, try again. If it * is dead, pid_task() will return NULL, if we race with * de_thread() it will find the new leader.
*/
}
}
/* * The usb asyncio usage of siginfo is wrong. The glibc support * for asyncio which uses SI_ASYNCIO assumes the layout is SIL_RT. * AKA after the generic fields: * kernel_pid_t si_pid; * kernel_uid32_t si_uid; * sigval_t si_value; * * Unfortunately when usb generates SI_ASYNCIO it assumes the layout * after the generic fields is: * void __user *si_addr; * * This is a practical problem when there is a 64bit big endian kernel * and a 32bit userspace. As the 32bit address will encoded in the low * 32bits of the pointer. Those low 32bits will be stored at higher * address than appear in a 32 bit pointer. So userspace will not * see the address it was expecting for it's completions. * * There is nothing in the encoding that can allow * copy_siginfo_to_user32 to detect this confusion of formats, so * handle this by requiring the caller of kill_pid_usb_asyncio to * notice when this situration takes place and to store the 32bit * pointer in sival_int, instead of sival_addr of the sigval_t addr * parameter.
*/ int kill_pid_usb_asyncio(int sig, int errno, sigval_t addr, struct pid *pid, conststruct cred *cred)
{ struct kernel_siginfo info; struct task_struct *p; unsignedlong flags; int ret = -EINVAL;
rcu_read_lock();
p = pid_task(pid, PIDTYPE_PID); if (!p) {
ret = -ESRCH; goto out_unlock;
} if (!kill_as_cred_perm(cred, p)) {
ret = -EPERM; goto out_unlock;
}
ret = security_task_kill(p, &info, sig, cred); if (ret) goto out_unlock;
if (sig) { if (lock_task_sighand(p, &flags)) {
ret = __send_signal_locked(sig, &info, p, PIDTYPE_TGID, false);
unlock_task_sighand(p, &flags);
} else
ret = -ESRCH;
}
out_unlock:
rcu_read_unlock(); return ret;
}
EXPORT_SYMBOL_GPL(kill_pid_usb_asyncio);
/* * kill_something_info() interprets pid in interesting ways just like kill(2). * * POSIX specifies that kill(-1,sig) is unspecified, but what we have * is probably wrong. Should make it like BSD or SYSV.
*/
if (pid > 0) return kill_proc_info(sig, info, pid);
/* -INT_MIN is undefined. Exclude this case to avoid a UBSAN warning */ if (pid == INT_MIN) return -ESRCH;
read_lock(&tasklist_lock); if (pid != -1) {
ret = __kill_pgrp_info(sig, info,
pid ? find_vpid(-pid) : task_pgrp(current));
} else { int retval = 0, count = 0; struct task_struct * p;
for_each_process(p) { if (task_pid_vnr(p) > 1 &&
!same_thread_group(p, current)) { int err = group_send_sig_info(sig, info, p,
PIDTYPE_MAX);
++count; if (err != -EPERM)
retval = err;
}
}
ret = count ? retval : -ESRCH;
}
read_unlock(&tasklist_lock);
return ret;
}
/* * These are for backward compatibility with the rest of the kernel source.
*/
int send_sig_info(int sig, struct kernel_siginfo *info, struct task_struct *p)
{ /* * Make sure legacy kernel users don't send in bad values * (normal paths check this in check_kill_permission).
*/ if (!valid_signal(sig)) return -EINVAL;
/* * When things go south during signal handling, we * will force a SIGSEGV. And if the signal that caused * the problem was already a SIGSEGV, we'll want to * make sure we don't even try to deliver the signal..
*/ void force_sigsegv(int sig)
{ if (sig == SIGSEGV)
force_fatal_sig(SIGSEGV); else
force_sig(SIGSEGV);
}
int force_sig_fault_to_task(int sig, int code, void __user *addr, struct task_struct *t)
{ struct kernel_siginfo info;
/* * Signals generated by perf events should not terminate the whole * process if SIGTRAP is blocked, however, delivering the signal * asynchronously is better than not delivering at all. But tell user * space if the signal was asynchronous, so it can clearly be * distinguished from normal synchronous ones.
*/
info.si_perf_flags = sigismember(¤t->blocked, info.si_signo) ?
TRAP_PERF_FLAG_ASYNC :
0;
/** * force_sig_seccomp - signals the task to allow in-process syscall emulation * @syscall: syscall number to send to userland * @reason: filter-supplied reason code to send to userland (via si_errno) * @force_coredump: true to trigger a coredump * * Forces a SIGSYS with a code of SYS_SECCOMP and related sigsys info.
*/ int force_sig_seccomp(int syscall, int reason, bool force_coredump)
{ struct kernel_siginfo info;
/* For the crazy architectures that include trap information in * the errno field, instead of an actual errno value.
*/ int force_sig_ptrace_errno_trap(int errno, void __user *addr)
{ struct kernel_siginfo info;
/* For the rare architectures that include trap information using * si_trapno.
*/ int force_sig_fault_trapno(int sig, int code, void __user *addr, int trapno)
{ struct kernel_siginfo info;
/* For the rare architectures that include trap information using * si_trapno.
*/ int send_sig_fault_trapno(int sig, int code, void __user *addr, int trapno, struct task_struct *t)
{ struct kernel_siginfo info;
/* * This function is used by POSIX timers to deliver a timer signal. * Where type is PIDTYPE_PID (such as for timers with SIGEV_THREAD_ID * set), the signal must be delivered to the specific thread (queues * into t->pending). * * Where type is not PIDTYPE_PID, signals must be delivered to the * process. In this case, prefer to deliver to current if it is in * the same thread group as the target process and its sighand is * stable, which avoids unnecessarily waking up a potentially idle task.
*/ staticinlinestruct task_struct *posixtimer_get_target(struct k_itimer *tmr)
{ struct task_struct *t = pid_task(tmr->it_pid, tmr->it_pid_type);
if (t && tmr->it_pid_type != PIDTYPE_PID &&
same_thread_group(t, current) && !current->exit_state)
t = current; return t;
}
void posixtimer_send_sigqueue(struct k_itimer *tmr)
{ struct sigqueue *q = &tmr->sigq; int sig = q->info.si_signo; struct task_struct *t; unsignedlong flags; int result;
guard(rcu)();
t = posixtimer_get_target(tmr); if (!t) return;
if (!likely(lock_task_sighand(t, &flags))) return;
/* * Update @tmr::sigqueue_seq for posix timer signals with sighand * locked to prevent a race against dequeue_signal().
*/
tmr->it_sigqueue_seq = tmr->it_signal_seq;
/* * Set the signal delivery status under sighand lock, so that the * ignored signal handling can distinguish between a periodic and a * non-periodic timer.
*/
tmr->it_sig_periodic = tmr->it_status == POSIX_TIMER_REQUEUE_PENDING;
if (!prepare_signal(sig, t, false)) {
result = TRACE_SIGNAL_IGNORED;
if (!list_empty(&q->list)) { /* * The signal was ignored and blocked. The timer * expiry queued it because blocked signals are * queued independent of the ignored state. * * The unblocking set SIGPENDING, but the signal * was not yet dequeued from the pending list. * So prepare_signal() sees unblocked and ignored, * which ends up here. Leave it queued like a * regular signal. * * The same happens when the task group is exiting * and the signal is already queued. * prepare_signal() treats SIGNAL_GROUP_EXIT as * ignored independent of its queued state. This * gets cleaned up in __exit_signal().
*/ goto out;
}
/* Periodic timers with SIG_IGN are queued on the ignored list */ if (tmr->it_sig_periodic) { /* * Already queued means the timer was rearmed after * the previous expiry got it on the ignore list. * Nothing to do for that case.
*/ if (hlist_unhashed(&tmr->ignored_list)) { /* * Take a signal reference and queue it on * the ignored list.
*/
posixtimer_sigqueue_getref(q);
posixtimer_sig_ignore(t, q);
}
} elseif (!hlist_unhashed(&tmr->ignored_list)) { /* * Covers the case where a timer was periodic and * then the signal was ignored. Later it was rearmed * as oneshot timer. The previous signal is invalid * now, and this oneshot signal has to be dropped. * Remove it from the ignored list and drop the * reference count as the signal is not longer * queued.
*/
hlist_del_init(&tmr->ignored_list);
posixtimer_putref(tmr);
} goto out;
}
if (unlikely(!list_empty(&q->list))) { /* This holds a reference count already */
result = TRACE_SIGNAL_ALREADY_PENDING; goto out;
}
/* * If the signal is on the ignore list, it got blocked after it was * ignored earlier. But nothing lifted the ignore. Move it back to * the pending list to be consistent with the regular signal * handling. This already holds a reference count. * * If it's not on the ignore list acquire a reference count.
*/ if (likely(hlist_unhashed(&tmr->ignored_list)))
posixtimer_sigqueue_getref(q); else
hlist_del_init(&tmr->ignored_list);
/* * If the timer is marked deleted already or the signal originates * from a non-periodic timer, then just drop the reference * count. Otherwise queue it on the ignored list.
*/ if (posixtimer_valid(tmr) && tmr->it_sig_periodic)
hlist_add_head(&tmr->ignored_list, &tsk->signal->ignored_posix_timers); else
posixtimer_putref(tmr);
}
/* * Rearming a timer with sighand lock held is not possible due to * lock ordering vs. tmr::it_lock. Just stick the sigqueue back and * let the signal delivery path deal with it whether it needs to be * rearmed or not. This cannot be decided here w/o dropping sighand * lock and creating a loop retry horror show.
*/
hlist_for_each_entry_safe(tmr, tmp , head, ignored_list) { struct task_struct *target;
/* * tmr::sigq.info.si_signo is immutable, so accessing it * without holding tmr::it_lock is safe.
*/ if (tmr->sigq.info.si_signo != sig) continue;
hlist_del_init(&tmr->ignored_list);
/* This should never happen and leaks a reference count */ if (WARN_ON_ONCE(!list_empty(&tmr->sigq.list))) continue;
/* * Get the target for the signal. If target is a thread and * has exited by now, drop the reference count.
*/
guard(rcu)();
target = posixtimer_get_target(tmr); if (target)
posixtimer_queue_sigqueue(&tmr->sigq, target, tmr->it_pid_type); else
posixtimer_putref(tmr);
}
} #else/* CONFIG_POSIX_TIMERS */ staticinlinevoid posixtimer_sig_ignore(struct task_struct *tsk, struct sigqueue *q) { } staticinlinevoid posixtimer_sig_unignore(struct task_struct *tsk, int sig) { } #endif/* !CONFIG_POSIX_TIMERS */
/* * Let a parent know about the death of a child. * For a stopped/continued status change, use do_notify_parent_cldstop instead. * * Returns true if our parent ignored us and so we've switched to * self-reaping.
*/ bool do_notify_parent(struct task_struct *tsk, int sig)
{ struct kernel_siginfo info; unsignedlong flags; struct sighand_struct *psig; bool autoreap = false;
u64 utime, stime;
WARN_ON_ONCE(sig == -1);
/* do_notify_parent_cldstop should have been called instead. */
WARN_ON_ONCE(task_is_stopped_or_traced(tsk));
/* ptraced, or group-leader without sub-threads */
do_notify_pidfd(tsk);
if (sig != SIGCHLD) { /* * This is only possible if parent == real_parent. * Check if it has changed security domain.
*/ if (tsk->parent_exec_id != READ_ONCE(tsk->parent->self_exec_id))
sig = SIGCHLD;
}
clear_siginfo(&info);
info.si_signo = sig;
info.si_errno = 0; /* * We are under tasklist_lock here so our parent is tied to * us and cannot change. * * task_active_pid_ns will always return the same pid namespace * until a task passes through release_task. * * write_lock() currently calls preempt_disable() which is the * same as rcu_read_lock(), but according to Oleg, this is not * correct to rely on this
*/
rcu_read_lock();
info.si_pid = task_pid_nr_ns(tsk, task_active_pid_ns(tsk->parent));
info.si_uid = from_kuid_munged(task_cred_xxx(tsk->parent, user_ns),
task_uid(tsk));
rcu_read_unlock();
psig = tsk->parent->sighand;
spin_lock_irqsave(&psig->siglock, flags); if (!tsk->ptrace && sig == SIGCHLD &&
(psig->action[SIGCHLD-1].sa.sa_handler == SIG_IGN ||
(psig->action[SIGCHLD-1].sa.sa_flags & SA_NOCLDWAIT))) { /* * We are exiting and our parent doesn't care. POSIX.1 * defines special semantics for setting SIGCHLD to SIG_IGN * or setting the SA_NOCLDWAIT flag: we should be reaped * automatically and not left for our parent's wait4 call. * Rather than having the parent do it as a magic kind of * signal handler, we just set this to tell do_exit that we * can be cleaned up without becoming a zombie. Note that * we still call __wake_up_parent in this case, because a * blocked sys_wait4 might now return -ECHILD. * * Whether we send SIGCHLD or not for SA_NOCLDWAIT * is implementation-defined: we do (if you don't want * it, just use SIG_IGN instead).
*/
autoreap = true; if (psig->action[SIGCHLD-1].sa.sa_handler == SIG_IGN)
sig = 0;
} /* * Send with __send_signal as si_pid and si_uid are in the * parent's namespaces.
*/ if (valid_signal(sig) && sig)
__send_signal_locked(sig, &info, tsk->parent, PIDTYPE_TGID, false);
__wake_up_parent(tsk, tsk->parent);
spin_unlock_irqrestore(&psig->siglock, flags);
return autoreap;
}
/** * do_notify_parent_cldstop - notify parent of stopped/continued state change * @tsk: task reporting the state change * @for_ptracer: the notification is for ptracer * @why: CLD_{CONTINUED|STOPPED|TRAPPED} to report * * Notify @tsk's parent that the stopped/continued state has changed. If * @for_ptracer is %false, @tsk's group leader notifies to its real parent. * If %true, @tsk reports to @tsk->parent which should be the ptracer. * * CONTEXT: * Must be called with tasklist_lock at least read locked.
*/ staticvoid do_notify_parent_cldstop(struct task_struct *tsk, bool for_ptracer, int why)
{ struct kernel_siginfo info; unsignedlong flags; struct task_struct *parent; struct sighand_struct *sighand;
u64 utime, stime;
sighand = parent->sighand;
spin_lock_irqsave(&sighand->siglock, flags); if (sighand->action[SIGCHLD-1].sa.sa_handler != SIG_IGN &&
!(sighand->action[SIGCHLD-1].sa.sa_flags & SA_NOCLDSTOP))
send_signal_locked(SIGCHLD, &info, parent, PIDTYPE_TGID); /* * Even if SIGCHLD is not generated, we must wake up wait4 calls.
*/
__wake_up_parent(tsk, parent);
spin_unlock_irqrestore(&sighand->siglock, flags);
}
/* * This must be called with current->sighand->siglock held. * * This should be the path for all ptrace stops. * We always set current->last_siginfo while stopped here. * That makes it a way to test a stopped process for * being ptrace-stopped vs being job-control-stopped. * * Returns the signal the ptracer requested the code resume * with. If the code did not stop because the tracer is gone, * the stop signal remains unchanged unless clear_code.
*/ staticint ptrace_stop(int exit_code, int why, unsignedlong message,
kernel_siginfo_t *info)
__releases(¤t->sighand->siglock)
__acquires(¤t->sighand->siglock)
{ bool gstop_done = false;
if (arch_ptrace_stop_needed()) { /* * The arch code has something special to do before a * ptrace stop. This is allowed to block, e.g. for faults * on user stack pages. We can't keep the siglock while * calling arch_ptrace_stop, so we must release it now. * To preserve proper semantics, we must do this before * any signal bookkeeping like checking group_stop_count.
*/
spin_unlock_irq(¤t->sighand->siglock);
arch_ptrace_stop();
spin_lock_irq(¤t->sighand->siglock);
}
/* * After this point ptrace_signal_wake_up or signal_wake_up * will clear TASK_TRACED if ptrace_unlink happens or a fatal * signal comes in. Handle previous ptrace_unlinks and fatal * signals here to prevent ptrace_stop sleeping in schedule.
*/ if (!current->ptrace || __fatal_signal_pending(current)) return exit_code;
/* * We're committing to trapping. TRACED should be visible before * TRAPPING is cleared; otherwise, the tracer might fail do_wait(). * Also, transition to TRACED and updates to ->jobctl should be * atomic with respect to siglock and should be done after the arch * hook as siglock is released and regrabbed across it. * * TRACER TRACEE * * ptrace_attach() * [L] wait_on_bit(JOBCTL_TRAPPING) [S] set_special_state(TRACED) * do_wait() * set_current_state() smp_wmb(); * ptrace_do_wait() * wait_task_stopped() * task_stopped_code() * [L] task_is_traced() [S] task_clear_jobctl_trapping();
*/
smp_wmb();
/* * If @why is CLD_STOPPED, we're trapping to participate in a group * stop. Do the bookkeeping. Note that if SIGCONT was delievered * across siglock relocks since INTERRUPT was scheduled, PENDING * could be clear now. We act as if SIGCONT is received after * TASK_TRACED is entered - ignore it.
*/ if (why == CLD_STOPPED && (current->jobctl & JOBCTL_STOP_PENDING))
gstop_done = task_participate_group_stop(current);
Die Informationen auf dieser Webseite wurden
nach bestem Wissen sorgfältig zusammengestellt. Es wird jedoch weder Vollständigkeit, noch Richtigkeit,
noch Qualität der bereit gestellten Informationen zugesichert.
Bemerkung:
Die farbliche Syntaxdarstellung und die Messung sind noch experimentell.