// SPDX-License-Identifier: GPL-2.0-only /* * linux/kernel/softirq.c * * Copyright (C) 1992 Linus Torvalds * * Rewritten. Old one was good in 2.2, but in 2.3 it was immoral. --ANK (990903)
*/
/* - No shared variables, all the data are CPU local. - If a softirq needs serialization, let it serialize itself by its own spinlocks. - Even if softirq is serialized, only local cpu is marked for execution. Hence, we get something sort of weak cpu binding. Though it is still not clear, will it result in better locality or will not.
Examples: - NET RX softirq. It is multithreaded and does not require any global serialization. - NET TX softirq. It kicks software netdevice queues, hence it is logically serialized per device, but this serialization is invisible to common code. - Tasklets: serialized wrt itself.
*/
/* * we cannot loop indefinitely here to avoid userspace starvation, * but we also don't want to introduce a worst case 1/HZ latency * to the pending events, so lets the scheduler to balance * the softirq load for us.
*/ staticvoid wakeup_softirqd(void)
{ /* Interrupts are disabled: no need to stop preemption */ struct task_struct *tsk = __this_cpu_read(ksoftirqd);
/* * SOFTIRQ_OFFSET usage: * * On !RT kernels 'count' is the preempt counter, on RT kernels this applies * to a per CPU counter and to task::softirqs_disabled_cnt. * * - count is changed by SOFTIRQ_OFFSET on entering or leaving softirq * processing. * * - count is changed by SOFTIRQ_DISABLE_OFFSET (= 2 * SOFTIRQ_OFFSET) * on local_bh_disable or local_bh_enable. * * This lets us distinguish between whether we are currently processing * softirq and whether we just have bh disabled.
*/ #ifdef CONFIG_PREEMPT_RT
/* * RT accounts for BH disabled sections in task::softirqs_disabled_cnt and * also in per CPU softirq_ctrl::cnt. This is necessary to allow tasks in a * softirq disabled section to be preempted. * * The per task counter is used for softirq_count(), in_softirq() and * in_serving_softirqs() because these counts are only valid when the task * holding softirq_ctrl::lock is running. * * The per CPU counter prevents pointless wakeups of ksoftirqd in case that * the task which is in a softirq disabled section is preempted or blocks.
*/ struct softirq_ctrl {
local_lock_t lock; int cnt;
};
/** * local_bh_blocked() - Check for idle whether BH processing is blocked * * Returns false if the per CPU softirq::cnt is 0 otherwise true. * * This is invoked from the idle task to guard against false positive * softirq pending warnings, which would happen when the task which holds * softirq_ctrl::lock was the only running task on the CPU and blocks on * some other lock.
*/ bool local_bh_blocked(void)
{ return __this_cpu_read(softirq_ctrl.cnt) != 0;
}
void __local_bh_disable_ip(unsignedlong ip, unsignedint cnt)
{ unsignedlong flags; int newcnt;
WARN_ON_ONCE(in_hardirq());
lock_map_acquire_read(&bh_lock_map);
/* First entry of a task into a BH disabled section? */ if (!current->softirq_disable_cnt) { if (preemptible()) {
local_lock(&softirq_ctrl.lock); /* Required to meet the RCU bottomhalf requirements. */
rcu_read_lock();
} else {
DEBUG_LOCKS_WARN_ON(this_cpu_read(softirq_ctrl.cnt));
}
}
/* * Track the per CPU softirq disabled state. On RT this is per CPU * state to allow preemption of bottom half disabled sections.
*/
newcnt = __this_cpu_add_return(softirq_ctrl.cnt, cnt); /* * Reflect the result in the task state to prevent recursion on the * local lock and to make softirq_count() & al work.
*/
current->softirq_disable_cnt = newcnt;
/* * If this is not reenabling soft interrupts, no point in trying to * run pending ones.
*/ if (curcnt != cnt) goto out;
pending = local_softirq_pending(); if (!pending) goto out;
/* * If this was called from non preemptible context, wake up the * softirq daemon.
*/ if (!preempt_on) {
wakeup_softirqd(); goto out;
}
/* * Adjust softirq count to SOFTIRQ_OFFSET which makes * in_serving_softirq() become true.
*/
cnt = SOFTIRQ_OFFSET;
__local_bh_enable(cnt, false);
__do_softirq();
/* * Invoked from ksoftirqd_run() outside of the interrupt disabled section * to acquire the per CPU local lock for reentrancy protection.
*/ staticinlinevoid ksoftirqd_run_begin(void)
{
__local_bh_disable_ip(_RET_IP_, SOFTIRQ_OFFSET);
local_irq_disable();
}
/* Counterpart to ksoftirqd_run_begin() */ staticinlinevoid ksoftirqd_run_end(void)
{ /* pairs with the lock_map_acquire_read() in ksoftirqd_run_begin() */
lock_map_release(&bh_lock_map);
__local_bh_enable(SOFTIRQ_OFFSET, true);
WARN_ON_ONCE(in_interrupt());
local_irq_enable();
}
staticinlinevoid invoke_softirq(void)
{ if (should_wake_ksoftirqd())
wakeup_softirqd();
}
#define SCHED_SOFTIRQ_MASK BIT(SCHED_SOFTIRQ)
/* * flush_smp_call_function_queue() can raise a soft interrupt in a function * call. On RT kernels this is undesired and the only known functionalities * are in the block layer which is disabled on RT, and in the scheduler for * idle load balancing. If soft interrupts get raised which haven't been * raised before the flush, warn if it is not a SCHED_SOFTIRQ so it can be * investigated.
*/ void do_softirq_post_smp_call_flush(unsignedint was_pending)
{ unsignedint is_pending = local_softirq_pending();
/* * This one is for softirq.c-internal use, where hardirqs are disabled * legitimately:
*/ #ifdef CONFIG_TRACE_IRQFLAGS void __local_bh_disable_ip(unsignedlong ip, unsignedint cnt)
{ unsignedlong flags;
WARN_ON_ONCE(in_hardirq());
raw_local_irq_save(flags); /* * The preempt tracer hooks into preempt_count_add and will break * lockdep because it calls back into lockdep after SOFTIRQ_OFFSET * is set and before current->softirq_enabled is cleared. * We must manually increment preempt_count here and manually * call the trace_preempt_off later.
*/
__preempt_count_add(cnt); /* * Were softirqs turned off above:
*/ if (softirq_count() == (cnt & SOFTIRQ_MASK))
lockdep_softirqs_off(ip);
raw_local_irq_restore(flags);
if (preempt_count() == cnt)
trace_preempt_on(CALLER_ADDR0, get_lock_parent_ip());
if (softirq_count() == (cnt & SOFTIRQ_MASK))
lockdep_softirqs_on(_RET_IP_);
__preempt_count_sub(cnt);
}
/* * Special-case - softirqs can safely be enabled by __do_softirq(), * without processing still-pending softirqs:
*/ void _local_bh_enable(void)
{
WARN_ON_ONCE(in_hardirq());
__local_bh_enable(SOFTIRQ_DISABLE_OFFSET);
}
EXPORT_SYMBOL(_local_bh_enable);
void __local_bh_enable_ip(unsignedlong ip, unsignedint cnt)
{
WARN_ON_ONCE(in_hardirq());
lockdep_assert_irqs_enabled(); #ifdef CONFIG_TRACE_IRQFLAGS
local_irq_disable(); #endif /* * Are softirqs going to be turned on now:
*/ if (softirq_count() == SOFTIRQ_DISABLE_OFFSET)
lockdep_softirqs_on(ip); /* * Keep preemption disabled until we are done with * softirq processing:
*/
__preempt_count_sub(cnt - 1);
if (unlikely(!in_interrupt() && local_softirq_pending())) { /* * Run softirq if any pending. And do it in its own stack * as we may be calling this deep in a task call stack already.
*/
do_softirq();
}
staticinlinevoid invoke_softirq(void)
{ if (!force_irqthreads() || !__this_cpu_read(ksoftirqd)) { #ifdef CONFIG_HAVE_IRQ_EXIT_ON_IRQ_STACK /* * We can safely execute softirq on the current stack if * it is the irq stack, because it should be near empty * at this stage.
*/
__do_softirq(); #else /* * Otherwise, irq_exit() is called on the task stack that can * be potentially deep already. So call softirq in its own stack * to prevent from any overrun.
*/
do_softirq_own_stack(); #endif
} else {
wakeup_softirqd();
}
}
/* * We restart softirq processing for at most MAX_SOFTIRQ_RESTART times, * but break the loop if need_resched() is set or after 2 ms. * The MAX_SOFTIRQ_TIME provides a nice upper bound in most cases, but in * certain cases, such as stop_machine(), jiffies may cease to * increment and so we need the MAX_SOFTIRQ_RESTART limit as * well to make sure we eventually return from this method. * * These limits have been established via experimentation. * The two things to balance is latency against fairness - * we want to handle softirqs as soon as possible, but they * should not be able to lock up the box.
*/ #define MAX_SOFTIRQ_TIME msecs_to_jiffies(2) #define MAX_SOFTIRQ_RESTART 10
#ifdef CONFIG_TRACE_IRQFLAGS /* * When we run softirqs from irq_exit() and thus on the hardirq stack we need * to keep the lockdep irq context tracking as tight as possible in order to * not miss-qualify lock contexts and miss possible deadlocks.
*/
staticvoid handle_softirqs(bool ksirqd)
{ unsignedlong end = jiffies + MAX_SOFTIRQ_TIME; unsignedlong old_flags = current->flags; int max_restart = MAX_SOFTIRQ_RESTART; struct softirq_action *h; bool in_hardirq;
__u32 pending; int softirq_bit;
/* * Mask out PF_MEMALLOC as the current task context is borrowed for the * softirq. A softirq handled, such as network RX, might set PF_MEMALLOC * again if the socket is related to swapping.
*/
current->flags &= ~PF_MEMALLOC;
/** * irq_enter_rcu - Enter an interrupt context with RCU watching
*/ void irq_enter_rcu(void)
{
__irq_enter_raw();
if (tick_nohz_full_cpu(smp_processor_id()) ||
(is_idle_task(current) && (irq_count() == HARDIRQ_OFFSET)))
tick_irq_enter();
account_hardirq_enter(current);
}
/** * irq_enter - Enter an interrupt context including RCU update
*/ void irq_enter(void)
{
ct_irq_enter();
irq_enter_rcu();
}
staticinlinevoid tick_irq_exit(void)
{ #ifdef CONFIG_NO_HZ_COMMON int cpu = smp_processor_id();
/* Make sure that timer wheel updates are propagated */ if ((sched_core_idle_cpu(cpu) && !need_resched()) || tick_nohz_full_cpu(cpu)) { if (!in_hardirq())
tick_nohz_irq_exit();
} #endif
}
/** * irq_exit_rcu() - Exit an interrupt context without updating RCU * * Also processes softirqs if needed and possible.
*/ void irq_exit_rcu(void)
{
__irq_exit_rcu(); /* must be last! */
lockdep_hardirq_exit();
}
/** * irq_exit - Exit an interrupt context, update RCU and lockdep * * Also processes softirqs if needed and possible.
*/ void irq_exit(void)
{
__irq_exit_rcu();
ct_irq_exit(); /* must be last! */
lockdep_hardirq_exit();
}
/* * This function must run with irqs disabled!
*/ inlinevoid raise_softirq_irqoff(unsignedint nr)
{
__raise_softirq_irqoff(nr);
/* * If we're in an interrupt or softirq, we're done * (this also catches softirq-disabled code). We will * actually run the softirq once we return from * the irq or softirq. * * Otherwise we wake up ksoftirqd to make sure we * schedule the softirq soon.
*/ if (!in_interrupt() && should_wake_ksoftirqd())
wakeup_softirqd();
}
#ifdefined(CONFIG_SMP) || defined(CONFIG_PREEMPT_RT) /* * Do not use in new code. Waiting for tasklets from atomic contexts is * error prone and should be avoided.
*/ void tasklet_unlock_spin_wait(struct tasklet_struct *t)
{ while (test_bit(TASKLET_STATE_RUN, &(t)->state)) { if (IS_ENABLED(CONFIG_PREEMPT_RT)) { /* * Prevent a live lock when current preempted soft * interrupt processing or prevents ksoftirqd from * running. If the tasklet runs on a different CPU * then this has no effect other than doing the BH * disable/enable dance for nothing.
*/
local_bh_disable();
local_bh_enable();
} else {
cpu_relax();
}
}
}
EXPORT_SYMBOL(tasklet_unlock_spin_wait); #endif
void tasklet_kill(struct tasklet_struct *t)
{ if (in_interrupt())
pr_notice("Attempt to kill tasklet from interrupt\n");
staticvoid run_ksoftirqd(unsignedint cpu)
{
ksoftirqd_run_begin(); if (local_softirq_pending()) { /* * We can safely run softirq on inline stack, as we are not deep * in the task stack here.
*/
handle_softirqs(true);
ksoftirqd_run_end();
cond_resched(); return;
}
ksoftirqd_run_end();
}
Die Informationen auf dieser Webseite wurden
nach bestem Wissen sorgfältig zusammengestellt. Es wird jedoch weder Vollständigkeit, noch Richtigkeit,
noch Qualität der bereit gestellten Informationen zugesichert.
Bemerkung:
Die farbliche Syntaxdarstellung und die Messung sind noch experimentell.