// SPDX-License-Identifier: GPL-2.0 /* * Copyright(C) 2005-2006, Thomas Gleixner <tglx@linutronix.de> * Copyright(C) 2005-2007, Red Hat, Inc., Ingo Molnar * Copyright(C) 2006-2007 Timesys Corp., Thomas Gleixner * * High-resolution kernel timers * * In contrast to the low-resolution timeout API, aka timer wheel, * hrtimers provide finer resolution and accuracy depending on system * configuration and capabilities. * * Started by: Thomas Gleixner and Ingo Molnar * * Credits: * Based on the original timer wheel code * * Help, testing, suggestions, bugfixes, improvements were * provided by: * * George Anzinger, Andrew Morton, Steven Rostedt, Roman Zippel * et. al.
*/
/* * We are using hashed locking: holding per_cpu(hrtimer_bases)[n].lock * means that all timers which are tied to this base via timer->base are * locked, and the base itself is locked too. * * So __run_timers/migrate_timers can safely modify all timers which could * be found on the lists/queues. * * When the timer's base is locked, and the timer removed from list, it is * possible to set timer->base = &migration_base and drop the lock: the timer * remains locked.
*/ static struct hrtimer_clock_base *lock_hrtimer_base(conststruct hrtimer *timer, unsignedlong *flags)
__acquires(&timer->base->lock)
{ struct hrtimer_clock_base *base;
for (;;) {
base = READ_ONCE(timer->base); if (likely(base != &migration_base)) {
raw_spin_lock_irqsave(&base->cpu_base->lock, *flags); if (likely(base == timer->base)) return base; /* The timer has migrated to another CPU: */
raw_spin_unlock_irqrestore(&base->cpu_base->lock, *flags);
}
cpu_relax();
}
}
/* * Check if the elected target is suitable considering its next * event and the hotplug state of the current CPU. * * If the elected target is remote and its next event is after the timer * to queue, then a remote reprogram is necessary. However there is no * guarantee the IPI handling the operation would arrive in time to meet * the high resolution deadline. In this case the local CPU becomes a * preferred target, unless it is offline. * * High and low resolution modes are handled the same way for simplicity. * * Called with cpu_base->lock of target cpu held.
*/ staticbool hrtimer_suitable_target(struct hrtimer *timer, struct hrtimer_clock_base *new_base, struct hrtimer_cpu_base *new_cpu_base, struct hrtimer_cpu_base *this_cpu_base)
{
ktime_t expires;
/* * The local CPU clockevent can be reprogrammed. Also get_target_base() * guarantees it is online.
*/ if (new_cpu_base == this_cpu_base) returntrue;
/* * The offline local CPU can't be the default target if the * next remote target event is after this timer. Keep the * elected new base. An IPI will we issued to reprogram * it as a last resort.
*/ if (!hrtimer_base_is_online(this_cpu_base)) returntrue;
staticinlinestruct hrtimer_cpu_base *get_target_base(struct hrtimer_cpu_base *base, int pinned)
{ if (!hrtimer_base_is_online(base)) { int cpu = cpumask_any_and(cpu_online_mask, housekeeping_cpumask(HK_TYPE_TIMER));
/* * We switch the timer base to a power-optimized selected CPU target, * if: * - NO_HZ_COMMON is enabled * - timer migration is enabled * - the timer callback is not running * - the timer is not the first expiring timer on the new target * * If one of the above requirements is not fulfilled we move the timer * to the current CPU or leave it on the previously assigned CPU if * the timer callback is currently running.
*/ staticinlinestruct hrtimer_clock_base *
switch_hrtimer_base(struct hrtimer *timer, struct hrtimer_clock_base *base, int pinned)
{ struct hrtimer_cpu_base *new_cpu_base, *this_cpu_base; struct hrtimer_clock_base *new_base; int basenum = base->index;
if (base != new_base) { /* * We are trying to move timer to new_base. * However we can't change timer's base while it is running, * so we keep it on the same CPU. No hassle vs. reprogramming * the event source in the high resolution case. The softirq * code will take care of this when the timer function has * completed. There is no conflict as we hold the lock until * the timer is enqueued.
*/ if (unlikely(hrtimer_callback_running(timer))) return base;
/* See the comment in lock_hrtimer_base() */
WRITE_ONCE(timer->base, &migration_base);
raw_spin_unlock(&base->cpu_base->lock);
raw_spin_lock(&new_base->cpu_base->lock);
/* * Functions for the union type storage format of ktime_t which are * too large for inlining:
*/ #if BITS_PER_LONG < 64 /* * Divide a ktime value by a nanosecond value
*/
s64 __ktime_divns(const ktime_t kt, s64 div)
{ int sft = 0;
s64 dclc;
u64 tmp;
/* Make sure the divisor is less than 2^32: */ while (div >> 32) {
sft++;
div >>= 1;
}
tmp >>= sft;
do_div(tmp, (u32) div); return dclc < 0 ? -tmp : tmp;
}
EXPORT_SYMBOL_GPL(__ktime_divns); #endif/* BITS_PER_LONG >= 64 */
/* * Add two ktime values and do a safety check for overflow:
*/
ktime_t ktime_add_safe(const ktime_t lhs, const ktime_t rhs)
{
ktime_t res = ktime_add_unsafe(lhs, rhs);
/* * We use KTIME_SEC_MAX here, the maximum timeout which we can * return to user space in a timespec:
*/ if (res < 0 || res < lhs || res < rhs)
res = ktime_set(KTIME_SEC_MAX, 0);
/* * fixup_activate is called when: * - an active object is activated * - an unknown non-static object is activated
*/ staticbool hrtimer_fixup_activate(void *addr, enum debug_obj_state state)
{ switch (state) { case ODEBUG_STATE_ACTIVE:
WARN_ON(1);
fallthrough; default: returnfalse;
}
}
/* * fixup_free is called when: * - an active object is freed
*/ staticbool hrtimer_fixup_free(void *addr, enum debug_obj_state state)
{ struct hrtimer *timer = addr;
next = timerqueue_getnext(&base->active);
timer = container_of(next, struct hrtimer, node); if (timer == exclude) { /* Get to the next timer in the queue. */
next = timerqueue_iterate_next(next); if (!next) continue;
/* Skip cpu_base update if a timer is being excluded. */ if (exclude) continue;
if (timer->is_soft)
cpu_base->softirq_next_timer = timer; else
cpu_base->next_timer = timer;
}
} /* * clock_was_set() might have changed base->offset of any of * the clock bases so the result might be negative. Fix it up * to prevent a false positive in clockevents_program_event().
*/ if (expires_next < 0)
expires_next = 0; return expires_next;
}
/* * Recomputes cpu_base::*next_timer and returns the earliest expires_next * but does not set cpu_base::*expires_next, that is done by * hrtimer[_force]_reprogram and hrtimer_interrupt only. When updating * cpu_base::*expires_next right away, reprogramming logic would no longer * work. * * When a softirq is pending, we can ignore the HRTIMER_ACTIVE_SOFT bases, * those timers will get run whenever the softirq gets handled, at the end of * hrtimer_run_softirq(), hrtimer_update_softirq_timer() will re-add these bases. * * Therefore softirq values are those from the HRTIMER_ACTIVE_SOFT clock bases. * The !softirq values are the minima across HRTIMER_ACTIVE_ALL, unless an actual * softirq is pending, in which case they're the minima of HRTIMER_ACTIVE_HARD. * * @active_mask must be one of: * - HRTIMER_ACTIVE_ALL, * - HRTIMER_ACTIVE_SOFT, or * - HRTIMER_ACTIVE_HARD.
*/ static ktime_t
__hrtimer_get_next_event(struct hrtimer_cpu_base *cpu_base, unsignedint active_mask)
{ unsignedint active; struct hrtimer *next_timer = NULL;
ktime_t expires_next = KTIME_MAX;
/* * If the soft interrupt has already been activated, ignore the * soft bases. They will be handled in the already raised soft * interrupt.
*/ if (!cpu_base->softirq_activated) {
soft = __hrtimer_get_next_event(cpu_base, HRTIMER_ACTIVE_SOFT); /* * Update the soft expiry time. clock_settime() might have * affected it.
*/
cpu_base->softirq_expires_next = soft;
}
expires_next = __hrtimer_get_next_event(cpu_base, HRTIMER_ACTIVE_HARD); /* * If a softirq timer is expiring first, update cpu_base->next_timer * and program the hardware with the soft expiry time.
*/ if (expires_next > soft) {
cpu_base->next_timer = cpu_base->softirq_next_timer;
expires_next = soft;
}
/* * If hres is not active, hardware does not have to be * reprogrammed yet. * * If a hang was detected in the last timer interrupt then we * leave the hang delay active in the hardware. We want the * system to make progress. That also prevents the following * scenario: * T1 expires 50ms from now * T2 expires 5s from now * * T1 is removed, so this code is called and would reprogram * the hardware to 5s from now. Any hrtimer_start after that * will not reprogram the hardware due to hang_detected being * set. So we'd effectively block all timers until the T2 event * fires.
*/ if (!hrtimer_hres_active(cpu_base) || cpu_base->hang_detected) return;
tick_program_event(expires_next, 1);
}
/* * Reprogram the event source with checking both queues for the * next event * Called with interrupts disabled and base->lock held
*/ staticvoid
hrtimer_force_reprogram(struct hrtimer_cpu_base *cpu_base, int skip_equal)
{
ktime_t expires_next;
/* * hrtimer_high_res_enabled - query, if the highres mode is enabled
*/ staticinlineint hrtimer_is_hres_enabled(void)
{ return hrtimer_hres_enabled;
}
/* * Switch to high resolution mode
*/ staticvoid hrtimer_switch_to_hres(void)
{ struct hrtimer_cpu_base *base = this_cpu_ptr(&hrtimer_bases);
if (tick_init_highres()) {
pr_warn("Could not switch to high resolution mode on CPU %u\n",
base->cpu); return;
}
base->hres_active = 1;
hrtimer_resolution = HIGH_RES_NSEC;
tick_setup_sched_timer(true); /* "Retrigger" the interrupt to get things going */
retrigger_next_event(NULL);
}
#endif/* CONFIG_HIGH_RES_TIMERS */ /* * Retrigger next event is called after clock was set with interrupts * disabled through an SMP function call or directly from low level * resume code. * * This is only invoked when: * - CONFIG_HIGH_RES_TIMERS is enabled. * - CONFIG_NOHZ_COMMON is enabled * * For the other cases this function is empty and because the call sites * are optimized out it vanishes as well, i.e. no need for lots of * #ifdeffery.
*/ staticvoid retrigger_next_event(void *arg)
{ struct hrtimer_cpu_base *base = this_cpu_ptr(&hrtimer_bases);
/* * When high resolution mode or nohz is active, then the offsets of * CLOCK_REALTIME/TAI/BOOTTIME have to be updated. Otherwise the * next tick will take care of that. * * If high resolution mode is active then the next expiring timer * must be reevaluated and the clock event device reprogrammed if * necessary. * * In the NOHZ case the update of the offset and the reevaluation * of the next expiring timer is enough. The return from the SMP * function call will take care of the reprogramming in case the * CPU was in a NOHZ idle sleep. * * In periodic low resolution mode, the next softirq expiration * must also be updated.
*/
raw_spin_lock(&base->lock);
hrtimer_update_base(base); if (hrtimer_hres_active(base))
hrtimer_force_reprogram(base, 0); else
hrtimer_update_next_event(base);
raw_spin_unlock(&base->lock);
}
/* * When a timer is enqueued and expires earlier than the already enqueued * timers, we have to check, whether it expires earlier than the timer for * which the clock event device was armed. * * Called with interrupts disabled and base->cpu_base.lock held
*/ staticvoid hrtimer_reprogram(struct hrtimer *timer, bool reprogram)
{ struct hrtimer_cpu_base *cpu_base = this_cpu_ptr(&hrtimer_bases); struct hrtimer_clock_base *base = timer->base;
ktime_t expires = ktime_sub(hrtimer_get_expires(timer), base->offset);
/* * CLOCK_REALTIME timer might be requested with an absolute * expiry time which is less than base->offset. Set it to 0.
*/ if (expires < 0)
expires = 0;
if (timer->is_soft) { /* * soft hrtimer could be started on a remote CPU. In this * case softirq_expires_next needs to be updated on the * remote CPU. The soft hrtimer will not expire before the * first hard hrtimer on the remote CPU - * hrtimer_check_target() prevents this case.
*/ struct hrtimer_cpu_base *timer_cpu_base = base->cpu_base;
if (timer_cpu_base->softirq_activated) return;
if (!ktime_before(expires, timer_cpu_base->softirq_expires_next)) return;
if (!ktime_before(expires, timer_cpu_base->expires_next) ||
!reprogram) return;
}
/* * If the timer is not on the current cpu, we cannot reprogram * the other cpus clock event device.
*/ if (base->cpu_base != cpu_base) return;
if (expires >= cpu_base->expires_next) return;
/* * If the hrtimer interrupt is running, then it will reevaluate the * clock bases and reprogram the clock event device.
*/ if (cpu_base->in_hrtirq) return;
/* * Update the base offsets unconditionally so the following * checks whether the SMP function call is required works. * * The update is safe even when the remote CPU is in the hrtimer * interrupt or the hrtimer soft interrupt and expiring affected * bases. Either it will see the update before handling a base or * it will see it when it finishes the processing and reevaluates * the next expiring timer.
*/
seq = cpu_base->clock_was_set_seq;
hrtimer_update_base(cpu_base);
/* * If the sequence did not change over the update then the * remote CPU already handled it.
*/ if (seq == cpu_base->clock_was_set_seq) returnfalse;
/* * If the remote CPU is currently handling an hrtimer interrupt, it * will reevaluate the first expiring timer of all clock bases * before reprogramming. Nothing to do here.
*/ if (cpu_base->in_hrtirq) returnfalse;
/* * Walk the affected clock bases and check whether the first expiring * timer in a clock base is moving ahead of the first expiring timer of * @cpu_base. If so, the IPI must be invoked because per CPU clock * event devices cannot be remotely reprogrammed.
*/
active &= cpu_base->active_bases;
next = timerqueue_getnext(&base->active);
expires = ktime_sub(next->expires, base->offset); if (expires < cpu_base->expires_next) returntrue;
/* Extra check for softirq clock bases */ if (base->clockid < HRTIMER_BASE_MONOTONIC_SOFT) continue; if (cpu_base->softirq_activated) continue; if (expires < cpu_base->softirq_expires_next) returntrue;
} returnfalse;
}
/* * Clock was set. This might affect CLOCK_REALTIME, CLOCK_TAI and * CLOCK_BOOTTIME (for late sleep time injection). * * This requires to update the offsets for these clocks * vs. CLOCK_MONOTONIC. When high resolution timers are enabled, then this * also requires to eventually reprogram the per CPU clock event devices * when the change moves an affected timer ahead of the first expiring * timer on that CPU. Obviously remote per CPU clock event devices cannot * be reprogrammed. The other reason why an IPI has to be sent is when the * system is in !HIGH_RES and NOHZ mode. The NOHZ mode updates the offsets * in the tick, which obviously might be stopped, so this has to bring out * the remote CPU which might sleep in idle to get this sorted.
*/ void clock_was_set(unsignedint bases)
{ struct hrtimer_cpu_base *cpu_base = raw_cpu_ptr(&hrtimer_bases);
cpumask_var_t mask; int cpu;
if (!hrtimer_hres_active(cpu_base) && !tick_nohz_active) goto out_timerfd;
if (!zalloc_cpumask_var(&mask, GFP_KERNEL)) {
on_each_cpu(retrigger_next_event, NULL, 1); goto out_timerfd;
}
/* Avoid interrupting CPUs if possible */
cpus_read_lock();
for_each_online_cpu(cpu) { unsignedlong flags;
/* * Called from timekeeping code to reprogram the hrtimer interrupt device * on all cpus and to notify timerfd.
*/ void clock_was_set_delayed(void)
{
schedule_work(&hrtimer_work);
}
/* * Called during resume either directly from via timekeeping_resume() * or in the case of s2idle from tick_unfreeze() to ensure that the * hrtimers are up to date.
*/ void hrtimers_resume_local(void)
{
lockdep_assert_irqs_disabled(); /* Retrigger on the local CPU */
retrigger_next_event(NULL);
}
/** * hrtimer_forward() - forward the timer expiry * @timer: hrtimer to forward * @now: forward past this time * @interval: the interval to forward * * Forward the timer expiry so it will expire in the future. * * .. note:: * This only updates the timer expiry value and does not requeue the timer. * * There is also a variant of the function hrtimer_forward_now(). * * Context: Can be safely called from the callback function of @timer. If called * from other contexts @timer must neither be enqueued nor running the * callback and the caller needs to take care of serialization. * * Return: The number of overruns are returned.
*/
u64 hrtimer_forward(struct hrtimer *timer, ktime_t now, ktime_t interval)
{
u64 orun = 1;
ktime_t delta;
if (WARN_ON(timer->state & HRTIMER_STATE_ENQUEUED)) return 0;
if (interval < hrtimer_resolution)
interval = hrtimer_resolution;
if (unlikely(delta >= interval)) {
s64 incr = ktime_to_ns(interval);
orun = ktime_divns(delta, incr);
hrtimer_add_expires_ns(timer, incr * orun); if (hrtimer_get_expires_tv64(timer) > now) return orun; /* * This (and the ktime_add() below) is the * correction for exact:
*/
orun++;
}
hrtimer_add_expires(timer, interval);
/* * enqueue_hrtimer - internal function to (re)start a timer * * The timer is inserted in expiry order. Insertion into the * red black tree is O(log(n)). Must hold the base lock. * * Returns true when the new timer is the leftmost timer in the tree.
*/ staticbool enqueue_hrtimer(struct hrtimer *timer, struct hrtimer_clock_base *base, enum hrtimer_mode mode)
{
debug_activate(timer, mode);
WARN_ON_ONCE(!base->cpu_base->online);
base->cpu_base->active_bases |= 1 << base->index;
/* Pairs with the lockless read in hrtimer_is_queued() */
WRITE_ONCE(timer->state, HRTIMER_STATE_ENQUEUED);
/* * __remove_hrtimer - internal function to remove a timer * * Caller must hold the base lock. * * High resolution timer mode reprograms the clock event device when the * timer is the one which expires next. The caller can disable this by setting * reprogram to zero. This is useful, when the context does a reprogramming * anyway (e.g. timer interrupt)
*/ staticvoid __remove_hrtimer(struct hrtimer *timer, struct hrtimer_clock_base *base,
u8 newstate, int reprogram)
{ struct hrtimer_cpu_base *cpu_base = base->cpu_base;
u8 state = timer->state;
/* Pairs with the lockless read in hrtimer_is_queued() */
WRITE_ONCE(timer->state, newstate); if (!(state & HRTIMER_STATE_ENQUEUED)) return;
if (!timerqueue_del(&base->active, &timer->node))
cpu_base->active_bases &= ~(1 << base->index);
/* * Note: If reprogram is false we do not update * cpu_base->next_timer. This happens when we remove the first * timer on a remote cpu. No harm as we never dereference * cpu_base->next_timer. So the worst thing what can happen is * an superfluous call to hrtimer_force_reprogram() on the * remote cpu later on if the same timer gets enqueued again.
*/ if (reprogram && timer == cpu_base->next_timer)
hrtimer_force_reprogram(cpu_base, 1);
}
/* * remove hrtimer, called with base lock held
*/ staticinlineint
remove_hrtimer(struct hrtimer *timer, struct hrtimer_clock_base *base, bool restart, bool keep_local)
{
u8 state = timer->state;
if (state & HRTIMER_STATE_ENQUEUED) { bool reprogram;
/* * Remove the timer and force reprogramming when high * resolution mode is active and the timer is on the current * CPU. If we remove a timer on another CPU, reprogramming is * skipped. The interrupt event on this CPU is fired and * reprogramming happens in the interrupt handler. This is a * rare case and less expensive than a smp call.
*/
debug_deactivate(timer);
reprogram = base->cpu_base == this_cpu_ptr(&hrtimer_bases);
/* * If the timer is not restarted then reprogramming is * required if the timer is local. If it is local and about * to be restarted, avoid programming it twice (on removal * and a moment later when it's requeued).
*/ if (!restart)
state = HRTIMER_STATE_INACTIVE; else
reprogram &= !keep_local;
staticinline ktime_t hrtimer_update_lowres(struct hrtimer *timer, ktime_t tim, constenum hrtimer_mode mode)
{ #ifdef CONFIG_TIME_LOW_RES /* * CONFIG_TIME_LOW_RES indicates that the system has no way to return * granular time values. For relative timers we add hrtimer_resolution * (i.e. one jiffy) to prevent short timeouts.
*/
timer->is_rel = mode & HRTIMER_MODE_REL; if (timer->is_rel)
tim = ktime_add_safe(tim, hrtimer_resolution); #endif return tim;
}
/* * Find the next SOFT expiration.
*/
expires = __hrtimer_get_next_event(cpu_base, HRTIMER_ACTIVE_SOFT);
/* * reprogramming needs to be triggered, even if the next soft * hrtimer expires at the same time than the next hard * hrtimer. cpu_base->softirq_expires_next needs to be updated!
*/ if (expires == KTIME_MAX) return;
/* * cpu_base->*next_timer is recomputed by __hrtimer_get_next_event() * cpu_base->*expires_next is only set by hrtimer_reprogram()
*/
hrtimer_reprogram(cpu_base->softirq_next_timer, reprogram);
}
/* * If the timer is on the local cpu base and is the first expiring * timer then this might end up reprogramming the hardware twice * (on removal and on enqueue). To avoid that by prevent the * reprogram on removal, keep the timer local to the current CPU * and enforce reprogramming after it is queued no matter whether * it is the new first expiring timer again or not.
*/
force_local = base->cpu_base == this_cpu_base;
force_local &= base->cpu_base->next_timer == timer;
/* * Don't force local queuing if this enqueue happens on a unplugged * CPU after hrtimer_cpu_dying() has been invoked.
*/
force_local &= this_cpu_base->online;
/* * Remove an active timer from the queue. In case it is not queued * on the current CPU, make sure that remove_hrtimer() updates the * remote data correctly. * * If it's on the current CPU and the first expiring timer, then * skip reprogramming, keep the timer local and enforce * reprogramming later if it was the first expiring timer. This * avoids programming the underlying clock event twice (once at * removal and once after enqueue).
*/
remove_hrtimer(timer, base, true, force_local);
if (mode & HRTIMER_MODE_REL)
tim = ktime_add_safe(tim, base->get_time());
/* Switch the timer base, if necessary: */ if (!force_local) {
new_base = switch_hrtimer_base(timer, base,
mode & HRTIMER_MODE_PINNED);
} else {
new_base = base;
}
first = enqueue_hrtimer(timer, new_base, mode); if (!force_local) { /* * If the current CPU base is online, then the timer is * never queued on a remote CPU if it would be the first * expiring timer there.
*/ if (hrtimer_base_is_online(this_cpu_base)) return first;
/* * Timer was enqueued remote because the current base is * already offline. If the timer is the first to expire, * kick the remote CPU to reprogram the clock event.
*/ if (first) { struct hrtimer_cpu_base *new_cpu_base = new_base->cpu_base;
/* * Timer was forced to stay on the current CPU to avoid * reprogramming on removal and enqueue. Force reprogram the * hardware by evaluating the new first expiring timer.
*/
hrtimer_force_reprogram(new_base->cpu_base, 1); return 0;
}
/** * hrtimer_start_range_ns - (re)start an hrtimer * @timer: the timer to be added * @tim: expiry time * @delta_ns: "slack" range for the timer * @mode: timer mode: absolute (HRTIMER_MODE_ABS) or * relative (HRTIMER_MODE_REL), and pinned (HRTIMER_MODE_PINNED); * softirq based mode is considered for debug purpose only!
*/ void hrtimer_start_range_ns(struct hrtimer *timer, ktime_t tim,
u64 delta_ns, constenum hrtimer_mode mode)
{ struct hrtimer_clock_base *base; unsignedlong flags;
/* * Check whether the HRTIMER_MODE_SOFT bit and hrtimer.is_soft * match on CONFIG_PREEMPT_RT = n. With PREEMPT_RT check the hard * expiry mode because unmarked timers are moved to softirq expiry.
*/ if (!IS_ENABLED(CONFIG_PREEMPT_RT))
WARN_ON_ONCE(!(mode & HRTIMER_MODE_SOFT) ^ !timer->is_soft); else
WARN_ON_ONCE(!(mode & HRTIMER_MODE_HARD) ^ !timer->is_hard);
base = lock_hrtimer_base(timer, &flags);
if (__hrtimer_start_range_ns(timer, tim, delta_ns, mode, base))
hrtimer_reprogram(timer, true);
/** * hrtimer_try_to_cancel - try to deactivate a timer * @timer: hrtimer to stop * * Returns: * * * 0 when the timer was not active * * 1 when the timer was active * * -1 when the timer is currently executing the callback function and * cannot be stopped
*/ int hrtimer_try_to_cancel(struct hrtimer *timer)
{ struct hrtimer_clock_base *base; unsignedlong flags; int ret = -1;
/* * Check lockless first. If the timer is not active (neither * enqueued nor running the callback, nothing to do here. The * base lock does not serialize against a concurrent enqueue, * so we can avoid taking it.
*/ if (!hrtimer_active(timer)) return 0;
base = lock_hrtimer_base(timer, &flags);
if (!hrtimer_callback_running(timer))
ret = remove_hrtimer(timer, base, false, false);
/* * The counterpart to hrtimer_cancel_wait_running(). * * If there is a waiter for cpu_base->expiry_lock, then it was waiting for * the timer callback to finish. Drop expiry_lock and reacquire it. That * allows the waiter to acquire the lock and make progress.
*/ staticvoid hrtimer_sync_wait_running(struct hrtimer_cpu_base *cpu_base, unsignedlong flags)
{ if (atomic_read(&cpu_base->timer_waiters)) {
raw_spin_unlock_irqrestore(&cpu_base->lock, flags);
spin_unlock(&cpu_base->softirq_expiry_lock);
spin_lock(&cpu_base->softirq_expiry_lock);
raw_spin_lock_irq(&cpu_base->lock);
}
}
/* * This function is called on PREEMPT_RT kernels when the fast path * deletion of a timer failed because the timer callback function was * running. * * This prevents priority inversion: if the soft irq thread is preempted * in the middle of a timer callback, then calling hrtimer_cancel() can * lead to two issues: * * - If the caller is on a remote CPU then it has to spin wait for the timer * handler to complete. This can result in unbound priority inversion. * * - If the caller originates from the task which preempted the timer * handler on the same CPU, then spin waiting for the timer handler to * complete is never going to end.
*/ void hrtimer_cancel_wait_running(conststruct hrtimer *timer)
{ /* Lockless read. Prevent the compiler from reloading it below */ struct hrtimer_clock_base *base = READ_ONCE(timer->base);
/* * Just relax if the timer expires in hard interrupt context or if * it is currently on the migration base.
*/ if (!timer->is_soft || is_migration_base(base)) {
cpu_relax(); return;
}
/* * Mark the base as contended and grab the expiry lock, which is * held by the softirq across the timer callback. Drop the lock * immediately so the softirq can expire the next timer. In theory * the timer could already be running again, but that's more than * unlikely and just causes another wait loop.
*/
atomic_inc(&base->cpu_base->timer_waiters);
spin_lock_bh(&base->cpu_base->softirq_expiry_lock);
atomic_dec(&base->cpu_base->timer_waiters);
spin_unlock_bh(&base->cpu_base->softirq_expiry_lock);
} #else staticinlinevoid
hrtimer_cpu_base_init_expiry_lock(struct hrtimer_cpu_base *base) { } staticinlinevoid
hrtimer_cpu_base_lock_expiry(struct hrtimer_cpu_base *base) { } staticinlinevoid
hrtimer_cpu_base_unlock_expiry(struct hrtimer_cpu_base *base) { } staticinlinevoid hrtimer_sync_wait_running(struct hrtimer_cpu_base *base, unsignedlong flags) { } #endif
/** * hrtimer_cancel - cancel a timer and wait for the handler to finish. * @timer: the timer to be cancelled * * Returns: * 0 when the timer was not active * 1 when the timer was active
*/ int hrtimer_cancel(struct hrtimer *timer)
{ int ret;
do {
ret = hrtimer_try_to_cancel(timer);
if (ret < 0)
hrtimer_cancel_wait_running(timer);
} while (ret < 0); return ret;
}
EXPORT_SYMBOL_GPL(hrtimer_cancel);
/** * __hrtimer_get_remaining - get remaining time for the timer * @timer: the timer to read * @adjust: adjust relative timers when CONFIG_TIME_LOW_RES=y
*/
ktime_t __hrtimer_get_remaining(conststruct hrtimer *timer, bool adjust)
{ unsignedlong flags;
ktime_t rem;
lock_hrtimer_base(timer, &flags); if (IS_ENABLED(CONFIG_TIME_LOW_RES) && adjust)
rem = hrtimer_expires_remaining_adjusted(timer); else
rem = hrtimer_expires_remaining(timer);
unlock_hrtimer_base(timer, &flags);
#ifdef CONFIG_NO_HZ_COMMON /** * hrtimer_get_next_event - get the time until next expiry event * * Returns the next expiry time or KTIME_MAX if no timer is pending.
*/
u64 hrtimer_get_next_event(void)
{ struct hrtimer_cpu_base *cpu_base = this_cpu_ptr(&hrtimer_bases);
u64 expires = KTIME_MAX; unsignedlong flags;
raw_spin_lock_irqsave(&cpu_base->lock, flags);
if (!hrtimer_hres_active(cpu_base))
expires = __hrtimer_get_next_event(cpu_base, HRTIMER_ACTIVE_ALL);
/** * hrtimer_next_event_without - time until next expiry event w/o one timer * @exclude: timer to exclude * * Returns the next expiry time over all timers except for the @exclude one or * KTIME_MAX if none of them is pending.
*/
u64 hrtimer_next_event_without(conststruct hrtimer *exclude)
{ struct hrtimer_cpu_base *cpu_base = this_cpu_ptr(&hrtimer_bases);
u64 expires = KTIME_MAX; unsignedlong flags;
raw_spin_lock_irqsave(&cpu_base->lock, flags);
if (hrtimer_hres_active(cpu_base)) { unsignedint active;
if (!cpu_base->softirq_activated) {
active = cpu_base->active_bases & HRTIMER_ACTIVE_SOFT;
expires = __hrtimer_next_event_base(cpu_base, exclude,
active, KTIME_MAX);
}
active = cpu_base->active_bases & HRTIMER_ACTIVE_HARD;
expires = __hrtimer_next_event_base(cpu_base, exclude, active,
expires);
}
/* * On PREEMPT_RT enabled kernels hrtimers which are not explicitly * marked for hard interrupt expiry mode are moved into soft * interrupt context for latency reasons and because the callbacks * can invoke functions which might sleep on RT, e.g. spin_lock().
*/ if (IS_ENABLED(CONFIG_PREEMPT_RT) && !(mode & HRTIMER_MODE_HARD))
softtimer = true;
memset(timer, 0, sizeof(struct hrtimer));
cpu_base = raw_cpu_ptr(&hrtimer_bases);
/* * POSIX magic: Relative CLOCK_REALTIME timers are not affected by * clock modifications, so they needs to become CLOCK_MONOTONIC to * ensure POSIX compliance.
*/ if (clock_id == CLOCK_REALTIME && mode & HRTIMER_MODE_REL)
clock_id = CLOCK_MONOTONIC;
/** * hrtimer_setup - initialize a timer to the given clock * @timer: the timer to be initialized * @function: the callback function * @clock_id: the clock to be used * @mode: The modes which are relevant for initialization: * HRTIMER_MODE_ABS, HRTIMER_MODE_REL, HRTIMER_MODE_ABS_SOFT, * HRTIMER_MODE_REL_SOFT * * The PINNED variants of the above can be handed in, * but the PINNED bit is ignored as pinning happens * when the hrtimer is started
*/ void hrtimer_setup(struct hrtimer *timer, enum hrtimer_restart (*function)(struct hrtimer *),
clockid_t clock_id, enum hrtimer_mode mode)
{
debug_setup(timer, clock_id, mode);
__hrtimer_setup(timer, function, clock_id, mode);
}
EXPORT_SYMBOL_GPL(hrtimer_setup);
/** * hrtimer_setup_on_stack - initialize a timer on stack memory * @timer: The timer to be initialized * @function: the callback function * @clock_id: The clock to be used * @mode: The timer mode * * Similar to hrtimer_setup(), except that this one must be used if struct hrtimer is in stack * memory.
*/ void hrtimer_setup_on_stack(struct hrtimer *timer, enum hrtimer_restart (*function)(struct hrtimer *),
clockid_t clock_id, enum hrtimer_mode mode)
{
debug_setup_on_stack(timer, clock_id, mode);
__hrtimer_setup(timer, function, clock_id, mode);
}
EXPORT_SYMBOL_GPL(hrtimer_setup_on_stack);
/* * A timer is active, when it is enqueued into the rbtree or the * callback function is running or it's in the state of being migrated * to another cpu. * * It is important for this function to not return a false negative.
*/ bool hrtimer_active(conststruct hrtimer *timer)
{ struct hrtimer_clock_base *base; unsignedint seq;
do {
base = READ_ONCE(timer->base);
seq = raw_read_seqcount_begin(&base->seq);
if (timer->state != HRTIMER_STATE_INACTIVE ||
base->running == timer) returntrue;
} while (read_seqcount_retry(&base->seq, seq) ||
base != READ_ONCE(timer->base));
returnfalse;
}
EXPORT_SYMBOL_GPL(hrtimer_active);
/* * The write_seqcount_barrier()s in __run_hrtimer() split the thing into 3 * distinct sections: * * - queued: the timer is queued * - callback: the timer is being ran * - post: the timer is inactive or (re)queued * * On the read side we ensure we observe timer->state and cpu_base->running * from the same section, if anything changed while we looked at it, we retry. * This includes timer->base changing because sequence numbers alone are * insufficient for that. * * The sequence numbers are required because otherwise we could still observe * a false negative if the read side got smeared over multiple consecutive * __run_hrtimer() invocations.
*/
/* * Separate the ->running assignment from the ->state assignment. * * As with a regular write barrier, this ensures the read side in * hrtimer_active() cannot observe base->running == NULL && * timer->state == INACTIVE.
*/
raw_write_seqcount_barrier(&base->seq);
/* * Clear the 'is relative' flag for the TIME_LOW_RES case. If the * timer is restarted with a period then it becomes an absolute * timer. If its not restarted it does not matter.
*/ if (IS_ENABLED(CONFIG_TIME_LOW_RES))
timer->is_rel = false;
/* * The timer is marked as running in the CPU base, so it is * protected against migration to a different CPU even if the lock * is dropped.
*/
raw_spin_unlock_irqrestore(&cpu_base->lock, flags);
trace_hrtimer_expire_entry(timer, now);
expires_in_hardirq = lockdep_hrtimer_enter(timer);
/* * Note: We clear the running state after enqueue_hrtimer and * we do not reprogram the event hardware. Happens either in * hrtimer_start_range_ns() or in hrtimer_interrupt() * * Note: Because we dropped the cpu_base->lock above, * hrtimer_start_range_ns() can have popped in and enqueued the timer * for us already.
*/ if (restart != HRTIMER_NORESTART &&
!(timer->state & HRTIMER_STATE_ENQUEUED))
enqueue_hrtimer(timer, base, HRTIMER_MODE_ABS);
/* * Separate the ->running assignment from the ->state assignment. * * As with a regular write barrier, this ensures the read side in * hrtimer_active() cannot observe base->running.timer == NULL && * timer->state == INACTIVE.
*/
raw_write_seqcount_barrier(&base->seq);
while ((node = timerqueue_getnext(&base->active))) { struct hrtimer *timer;
timer = container_of(node, struct hrtimer, node);
/* * The immediate goal for using the softexpires is * minimizing wakeups, not running timers at the * earliest interrupt after their soft expiration. * This allows us to avoid using a Priority Search * Tree, which can answer a stabbing query for * overlapping intervals and instead use the simple * BST we already have. * We don't add extra wakeups by delaying timers that * are right-of a not yet expired timer, because that * timer will have to trigger a wakeup anyway.
*/ if (basenow < hrtimer_get_softexpires_tv64(timer)) break;
raw_spin_lock_irqsave(&cpu_base->lock, flags);
entry_time = now = hrtimer_update_base(cpu_base);
retry:
cpu_base->in_hrtirq = 1; /* * We set expires_next to KTIME_MAX here with cpu_base->lock * held to prevent that a timer is enqueued in our queue via * the migration code. This does not affect enqueueing of * timers which run their callback and need to be requeued on * this CPU.
*/
cpu_base->expires_next = KTIME_MAX;
/* Reevaluate the clock bases for the [soft] next expiry */
expires_next = hrtimer_update_next_event(cpu_base); /* * Store the new expiry value so the migration code can verify * against it.
*/
cpu_base->expires_next = expires_next;
cpu_base->in_hrtirq = 0;
raw_spin_unlock_irqrestore(&cpu_base->lock, flags);
/* * The next timer was already expired due to: * - tracing * - long lasting callbacks * - being scheduled away when running in a VM * * We need to prevent that we loop forever in the hrtimer * interrupt routine. We give it 3 attempts to avoid * overreacting on some spurious event. * * Acquire base lock for updating the offsets and retrieving * the current time.
*/
raw_spin_lock_irqsave(&cpu_base->lock, flags);
now = hrtimer_update_base(cpu_base);
cpu_base->nr_retries++; if (++retries < 3) goto retry; /* * Give the system a chance to do something else than looping * here. We stored the entry time, so we know exactly how long * we spent here. We schedule the next event this amount of * time away.
*/
cpu_base->nr_hangs++;
cpu_base->hang_detected = 1;
raw_spin_unlock_irqrestore(&cpu_base->lock, flags);
delta = ktime_sub(now, entry_time); if ((unsignedint)delta > cpu_base->max_hang_time)
cpu_base->max_hang_time = (unsignedint) delta; /* * Limit it to a sensible value as we enforce a longer * delay. Give the CPU at least 100ms to catch up.
*/ if (delta > 100 * NSEC_PER_MSEC)
expires_next = ktime_add_ns(now, 100 * NSEC_PER_MSEC); else
expires_next = ktime_add(now, delta);
tick_program_event(expires_next, 1);
pr_warn_once("hrtimer: interrupt took %llu ns\n", ktime_to_ns(delta));
} #endif/* !CONFIG_HIGH_RES_TIMERS */
/* * Called from run_local_timers in hardirq context every jiffy
*/ void hrtimer_run_queues(void)
{ struct hrtimer_cpu_base *cpu_base = this_cpu_ptr(&hrtimer_bases); unsignedlong flags;
ktime_t now;
if (hrtimer_hres_active(cpu_base)) return;
/* * This _is_ ugly: We have to check periodically, whether we * can switch to highres and / or nohz mode. The clocksource * switch happens with xtime_lock held. Notification from * there only sets the check bit in the tick_oneshot code, * otherwise we might deadlock vs. xtime_lock.
*/ if (tick_check_oneshot_change(!hrtimer_is_hres_enabled())) {
hrtimer_switch_to_hres(); return;
}
raw_spin_lock_irqsave(&cpu_base->lock, flags);
now = hrtimer_update_base(cpu_base);
/** * hrtimer_sleeper_start_expires - Start a hrtimer sleeper timer * @sl: sleeper to be started * @mode: timer mode abs/rel * * Wrapper around hrtimer_start_expires() for hrtimer_sleeper based timers * to allow PREEMPT_RT to tweak the delivery mode (soft/hardirq context)
*/ void hrtimer_sleeper_start_expires(struct hrtimer_sleeper *sl, enum hrtimer_mode mode)
{ /* * Make the enqueue delivery mode check work on RT. If the sleeper * was initialized for hard interrupt delivery, force the mode bit. * This is a special case for hrtimer_sleepers because * __hrtimer_setup_sleeper() determines the delivery mode on RT so the * fiddling with this decision is avoided at the call sites.
*/ if (IS_ENABLED(CONFIG_PREEMPT_RT) && sl->timer.is_hard)
mode |= HRTIMER_MODE_HARD;
staticvoid __hrtimer_setup_sleeper(struct hrtimer_sleeper *sl,
clockid_t clock_id, enum hrtimer_mode mode)
{ /* * On PREEMPT_RT enabled kernels hrtimers which are not explicitly * marked for hard interrupt expiry mode are moved into soft * interrupt context either for latency reasons or because the * hrtimer callback takes regular spinlocks or invokes other * functions which are not suitable for hard interrupt context on * PREEMPT_RT. * * The hrtimer_sleeper callback is RT compatible in hard interrupt * context, but there is a latency concern: Untrusted userspace can * spawn many threads which arm timers for the same expiry time on * the same CPU. That causes a latency spike due to the wakeup of * a gazillion threads. * * OTOH, privileged real-time user space applications rely on the * low latency of hard interrupt wakeups. If the current task is in * a real-time scheduling class, mark the mode for hard interrupt * expiry.
*/ if (IS_ENABLED(CONFIG_PREEMPT_RT)) { if (rt_or_dl_task_policy(current) && !(mode & HRTIMER_MODE_SOFT))
mode |= HRTIMER_MODE_HARD;
}
/* * Functions related to boot-time initialization:
*/ int hrtimers_prepare_cpu(unsignedint cpu)
{ struct hrtimer_cpu_base *cpu_base = &per_cpu(hrtimer_bases, cpu); int i;
for (i = 0; i < HRTIMER_MAX_CLOCK_BASES; i++) { struct hrtimer_clock_base *clock_b = &cpu_base->clock_base[i];
int hrtimers_cpu_starting(unsignedint cpu)
{ struct hrtimer_cpu_base *cpu_base = this_cpu_ptr(&hrtimer_bases);
/* Clear out any left over state from a CPU down operation */
cpu_base->active_bases = 0;
cpu_base->hres_active = 0;
cpu_base->hang_detected = 0;
cpu_base->next_timer = NULL;
cpu_base->softirq_next_timer = NULL;
cpu_base->expires_next = KTIME_MAX;
cpu_base->softirq_expires_next = KTIME_MAX;
cpu_base->online = 1; return 0;
}
/* * Mark it as ENQUEUED not INACTIVE otherwise the * timer could be seen as !active and just vanish away * under us on another CPU
*/
__remove_hrtimer(timer, old_base, HRTIMER_STATE_ENQUEUED, 0);
timer->base = new_base; /* * Enqueue the timers on the new cpu. This does not * reprogram the event device in case the timer * expires before the earliest on this CPU, but we run * hrtimer_interrupt after we migrated everything to * sort out already expired timers and reprogram the * event device.
*/
enqueue_hrtimer(timer, new_base, HRTIMER_MODE_ABS);
}
}
int hrtimers_cpu_dying(unsignedint dying_cpu)
{ int i, ncpu = cpumask_any_and(cpu_active_mask, housekeeping_cpumask(HK_TYPE_TIMER)); struct hrtimer_cpu_base *old_base, *new_base;
Die Informationen auf dieser Webseite wurden
nach bestem Wissen sorgfältig zusammengestellt. Es wird jedoch weder Vollständigkeit, noch Richtigkeit,
noch Qualität der bereit gestellten Informationen zugesichert.
Bemerkung:
Die farbliche Syntaxdarstellung und die Messung sind noch experimentell.