/* Workaround to allow gradual conversion of architecture code */ void __weak arch_do_signal_or_restart(struct pt_regs *regs) { }
/** * exit_to_user_mode_loop - do any pending work before leaving to user space * @regs: Pointer to pt_regs on entry stack * @ti_work: TIF work flags as read by the caller
*/
__always_inline unsignedlong exit_to_user_mode_loop(struct pt_regs *regs, unsignedlong ti_work)
{ /* * Before returning to user space ensure that all pending work * items have been completed.
*/ while (ti_work & EXIT_TO_USER_MODE_WORK) {
local_irq_enable_exit_to_user(ti_work);
if (ti_work & (_TIF_NEED_RESCHED | _TIF_NEED_RESCHED_LAZY))
schedule();
if (ti_work & _TIF_UPROBE)
uprobe_notify_resume(regs);
if (ti_work & _TIF_PATCH_PENDING)
klp_update_patch_state(current);
if (ti_work & (_TIF_SIGPENDING | _TIF_NOTIFY_SIGNAL))
arch_do_signal_or_restart(regs);
if (ti_work & _TIF_NOTIFY_RESUME)
resume_user_mode_work(regs);
/* Architecture specific TIF work */
arch_exit_to_user_mode_work(regs, ti_work);
/* * Disable interrupts and reevaluate the work flags as they * might have changed while interrupts and preemption was * enabled above.
*/
local_irq_disable_exit_to_user();
/* Check if any of the above work has queued a deferred wakeup */
tick_nohz_user_enter_prepare();
ti_work = read_thread_flags();
}
/* Return the latest work state for arch_exit_to_user_mode() */ return ti_work;
}
if (user_mode(regs)) {
irqentry_enter_from_user_mode(regs); return ret;
}
/* * If this entry hit the idle task invoke ct_irq_enter() whether * RCU is watching or not. * * Interrupts can nest when the first interrupt invokes softirq * processing on return which enables interrupts. * * Scheduler ticks in the idle task can mark quiescent state and * terminate a grace period, if and only if the timer interrupt is * not nested into another interrupt. * * Checking for rcu_is_watching() here would prevent the nesting * interrupt to invoke ct_irq_enter(). If that nested interrupt is * the tick then rcu_flavor_sched_clock_irq() would wrongfully * assume that it is the first interrupt and eventually claim * quiescent state and end grace periods prematurely. * * Unconditionally invoke ct_irq_enter() so RCU state stays * consistent. * * TINY_RCU does not support EQS, so let the compiler eliminate * this part when enabled.
*/ if (!IS_ENABLED(CONFIG_TINY_RCU) &&
(is_idle_task(current) || arch_in_rcu_eqs())) { /* * If RCU is not watching then the same careful * sequence vs. lockdep and tracing is required * as in irqentry_enter_from_user_mode().
*/
lockdep_hardirqs_off(CALLER_ADDR0);
ct_irq_enter();
instrumentation_begin();
kmsan_unpoison_entry_regs(regs);
trace_hardirqs_off_finish();
instrumentation_end();
ret.exit_rcu = true; return ret;
}
/* * If RCU is watching then RCU only wants to check whether it needs * to restart the tick in NOHZ mode. rcu_irq_enter_check_tick() * already contains a warning when RCU is not watching, so no point * in having another one here.
*/
lockdep_hardirqs_off(CALLER_ADDR0);
instrumentation_begin();
kmsan_unpoison_entry_regs(regs);
rcu_irq_enter_check_tick();
trace_hardirqs_off_finish();
instrumentation_end();
return ret;
}
void raw_irqentry_exit_cond_resched(void)
{ if (!preempt_count()) { /* Sanity check RCU and thread stack */
rcu_irq_exit_check_preempt(); if (IS_ENABLED(CONFIG_DEBUG_ENTRY))
WARN_ON_ONCE(!on_thread_stack()); if (need_resched())
preempt_schedule_irq();
}
} #ifdef CONFIG_PREEMPT_DYNAMIC #ifdefined(CONFIG_HAVE_PREEMPT_DYNAMIC_CALL)
DEFINE_STATIC_CALL(irqentry_exit_cond_resched, raw_irqentry_exit_cond_resched); #elifdefined(CONFIG_HAVE_PREEMPT_DYNAMIC_KEY)
DEFINE_STATIC_KEY_TRUE(sk_dynamic_irqentry_exit_cond_resched); void dynamic_irqentry_exit_cond_resched(void)
{ if (!static_branch_unlikely(&sk_dynamic_irqentry_exit_cond_resched)) return;
raw_irqentry_exit_cond_resched();
} #endif #endif
/* Check whether this returns to user mode */ if (user_mode(regs)) {
irqentry_exit_to_user_mode(regs);
} elseif (!regs_irqs_disabled(regs)) { /* * If RCU was not watching on entry this needs to be done * carefully and needs the same ordering of lockdep/tracing * and RCU as the return to user mode path.
*/ if (state.exit_rcu) {
instrumentation_begin(); /* Tell the tracer that IRET will enable interrupts */
trace_hardirqs_on_prepare();
lockdep_hardirqs_on_prepare();
instrumentation_end();
ct_irq_exit();
lockdep_hardirqs_on(CALLER_ADDR0); return;
}
instrumentation_begin(); if (IS_ENABLED(CONFIG_PREEMPTION))
irqentry_exit_cond_resched();
/* Covers both tracing and lockdep */
trace_hardirqs_on();
instrumentation_end();
} else { /* * IRQ flags state is correct already. Just tell RCU if it * was not watching on entry.
*/ if (state.exit_rcu)
ct_irq_exit();
}
}
Die Informationen auf dieser Webseite wurden
nach bestem Wissen sorgfältig zusammengestellt. Es wird jedoch weder Vollständigkeit, noch Richtigkeit,
noch Qualität der bereit gestellten Informationen zugesichert.
Bemerkung:
Die farbliche Syntaxdarstellung und die Messung sind noch experimentell.