/* * Upper bound sanity check. If the backwards conversion is * not equal latch, we know that the above shift overflowed.
*/ if ((clc >> evt->shift) != (u64)latch)
clc = ~0ULL;
/* * Scaled math oddities: * * For mult <= (1 << shift) we can safely add mult - 1 to * prevent integer rounding loss. So the backwards conversion * from nsec to device ticks will be correct. * * For mult > (1 << shift), i.e. device frequency is > 1GHz we * need to be careful. Adding mult - 1 will result in a value * which when converted back to device ticks can be larger * than latch by up to (mult - 1) >> shift. For the min_delta * calculation we still want to apply this in order to stay * above the minimum device ticks limit. For the upper limit * we would end up with a latch value larger than the upper * limit of the device, so we omit the add to stay below the * device upper boundary. * * Also omit the add if it would overflow the u64 boundary.
*/ if ((~0ULL - clc > rnd) &&
(!ismax || evt->mult <= (1ULL << evt->shift)))
clc += rnd;
do_div(clc, evt->mult);
/* Deltas less than 1usec are pointless noise */ return clc > 1000 ? clc : 1000;
}
/** * clockevent_delta2ns - Convert a latch value (device ticks) to nanoseconds * @latch: value to convert * @evt: pointer to clock event device descriptor * * Math helper, returns latch value converted to nanoseconds (bound checked)
*/
u64 clockevent_delta2ns(unsignedlong latch, struct clock_event_device *evt)
{ return cev_delta2ns(latch, evt, false);
}
EXPORT_SYMBOL_GPL(clockevent_delta2ns);
/* Transition with new state-specific callbacks */ switch (state) { case CLOCK_EVT_STATE_DETACHED: /* The clockevent device is getting replaced. Shut it down. */
case CLOCK_EVT_STATE_SHUTDOWN: if (dev->set_state_shutdown) return dev->set_state_shutdown(dev); return 0;
case CLOCK_EVT_STATE_PERIODIC: /* Core internal bug */ if (!(dev->features & CLOCK_EVT_FEAT_PERIODIC)) return -ENOSYS; if (dev->set_state_periodic) return dev->set_state_periodic(dev); return 0;
case CLOCK_EVT_STATE_ONESHOT: /* Core internal bug */ if (!(dev->features & CLOCK_EVT_FEAT_ONESHOT)) return -ENOSYS; if (dev->set_state_oneshot) return dev->set_state_oneshot(dev); return 0;
case CLOCK_EVT_STATE_ONESHOT_STOPPED: /* Core internal bug */ if (WARN_ONCE(!clockevent_state_oneshot(dev), "Current state: %d\n",
clockevent_get_state(dev))) return -EINVAL;
if (dev->set_state_oneshot_stopped) return dev->set_state_oneshot_stopped(dev); else return -ENOSYS;
default: return -ENOSYS;
}
}
/** * clockevents_switch_state - set the operating state of a clock event device * @dev: device to modify * @state: new state * * Must be called with interrupts disabled !
*/ void clockevents_switch_state(struct clock_event_device *dev, enum clock_event_state state)
{ if (clockevent_get_state(dev) != state) { if (__clockevents_switch_state(dev, state)) return;
clockevent_set_state(dev, state);
/* * A nsec2cyc multiplicator of 0 is invalid and we'd crash * on it, so fix it up and emit a warning:
*/ if (clockevent_state_oneshot(dev)) { if (WARN_ON(!dev->mult))
dev->mult = 1;
}
}
}
/** * clockevents_shutdown - shutdown the device and clear next_event * @dev: device to shutdown
*/ void clockevents_shutdown(struct clock_event_device *dev)
{
clockevents_switch_state(dev, CLOCK_EVT_STATE_SHUTDOWN);
dev->next_event = KTIME_MAX;
}
/** * clockevents_tick_resume - Resume the tick device before using it again * @dev: device to resume
*/ int clockevents_tick_resume(struct clock_event_device *dev)
{ int ret = 0;
if (dev->tick_resume)
ret = dev->tick_resume(dev);
return ret;
}
#ifdef CONFIG_GENERIC_CLOCKEVENTS_MIN_ADJUST
/* Limit min_delta to a jiffy */ #define MIN_DELTA_LIMIT (NSEC_PER_SEC / HZ)
/** * clockevents_increase_min_delta - raise minimum delta of a clock event device * @dev: device to increase the minimum delta * * Returns 0 on success, -ETIME when the minimum delta reached the limit.
*/ staticint clockevents_increase_min_delta(struct clock_event_device *dev)
{ /* Nothing to do if we already reached the limit */ if (dev->min_delta_ns >= MIN_DELTA_LIMIT) {
printk_deferred(KERN_WARNING "CE: Reprogramming failure. Giving up\n");
dev->next_event = KTIME_MAX; return -ETIME;
}
/** * clockevents_program_min_delta - Set clock event device to the minimum delay. * @dev: device to program * * Returns 0 on success, -ETIME when the retry loop failed.
*/ staticint clockevents_program_min_delta(struct clock_event_device *dev)
{ unsignedlonglong clc;
int64_t delta; int i;
for (i = 0;;) {
delta = dev->min_delta_ns;
dev->next_event = ktime_add_ns(ktime_get(), delta);
if (++i > 2) { /* * We tried 3 times to program the device with the * given min_delta_ns. Try to increase the minimum * delta, if that fails as well get out of here.
*/ if (clockevents_increase_min_delta(dev)) return -ETIME;
i = 0;
}
}
}
#else/* CONFIG_GENERIC_CLOCKEVENTS_MIN_ADJUST */
/** * clockevents_program_min_delta - Set clock event device to the minimum delay. * @dev: device to program * * Returns 0 on success, -ETIME when the retry loop failed.
*/ staticint clockevents_program_min_delta(struct clock_event_device *dev)
{ unsignedlonglong clc;
int64_t delta = 0; int i;
for (i = 0; i < 10; i++) {
delta += dev->min_delta_ns;
dev->next_event = ktime_add_ns(ktime_get(), delta);
/** * clockevents_program_event - Reprogram the clock event device. * @dev: device to program * @expires: absolute expiry time (monotonic clock) * @force: program minimum delay if expires can not be set * * Returns 0 on success, -ETIME when the event is in the past.
*/ int clockevents_program_event(struct clock_event_device *dev, ktime_t expires, bool force)
{ unsignedlonglong clc;
int64_t delta; int rc;
if (WARN_ON_ONCE(expires < 0)) return -ETIME;
dev->next_event = expires;
if (clockevent_state_shutdown(dev)) return 0;
/* We must be in ONESHOT state here */
WARN_ONCE(!clockevent_state_oneshot(dev), "Current state: %d\n",
clockevent_get_state(dev));
/* Shortcut for clockevent devices that can deal with ktime. */ if (dev->features & CLOCK_EVT_FEAT_KTIME) return dev->set_next_ktime(expires, dev);
delta = ktime_to_ns(ktime_sub(expires, ktime_get())); if (delta <= 0) return force ? clockevents_program_min_delta(dev) : -ETIME;
/* * Called after a clockevent has been added which might * have replaced a current regular or broadcast device. A * released normal device might be a suitable replacement * for the current broadcast device. Similarly a released * broadcast device might be a suitable replacement for a * normal device.
*/ staticvoid clockevents_notify_released(void)
{ struct clock_event_device *dev;
/* * Keep iterating as long as tick_check_new_device() * replaces a device.
*/ while (!list_empty(&clockevents_released)) {
dev = list_entry(clockevents_released.next, struct clock_event_device, list);
list_move(&dev->list, &clockevent_devices);
tick_check_new_device(dev);
}
}
if (!tick_check_replacement(newdev, dev)) continue;
if (!try_module_get(dev->owner)) continue;
if (newdev)
module_put(newdev->owner);
newdev = dev;
} if (newdev) {
tick_install_replacement(newdev);
list_del_init(&ced->list);
} return newdev ? 0 : -EBUSY;
}
/* * Called with clockevents_mutex and clockevents_lock held
*/ staticint __clockevents_try_unbind(struct clock_event_device *ced, int cpu)
{ /* Fast track. Device is unused */ if (clockevent_state_detached(ced)) {
list_del_init(&ced->list); return 0;
}
/* * SMP function call to unbind a device
*/ staticvoid __clockevents_unbind(void *arg)
{ struct ce_unbind *cu = arg; int res;
raw_spin_lock(&clockevents_lock);
res = __clockevents_try_unbind(cu->ce, smp_processor_id()); if (res == -EAGAIN)
res = clockevents_replace(cu->ce);
cu->res = res;
raw_spin_unlock(&clockevents_lock);
}
/* * Issues smp function call to unbind a per cpu device. Called with * clockevents_mutex held.
*/ staticint clockevents_unbind(struct clock_event_device *ced, int cpu)
{ struct ce_unbind cu = { .ce = ced, .res = -ENODEV };
if (!(dev->features & CLOCK_EVT_FEAT_ONESHOT)) return;
/* * Calculate the maximum number of seconds we can sleep. Limit * to 10 minutes for hardware which can program more than * 32bit ticks so we still get reasonable conversion values.
*/
sec = dev->max_delta_ticks;
do_div(sec, freq); if (!sec)
sec = 1; elseif (sec > 600 && dev->max_delta_ticks > UINT_MAX)
sec = 600;
/** * clockevents_config_and_register - Configure and register a clock event device * @dev: device to register * @freq: The clock frequency * @min_delta: The minimum clock ticks to program in oneshot mode * @max_delta: The maximum clock ticks to program in oneshot mode * * min/max_delta can be 0 for devices which do not support oneshot mode.
*/ void clockevents_config_and_register(struct clock_event_device *dev,
u32 freq, unsignedlong min_delta, unsignedlong max_delta)
{
dev->min_delta_ticks = min_delta;
dev->max_delta_ticks = max_delta;
clockevents_config(dev, freq);
clockevents_register_device(dev);
}
EXPORT_SYMBOL_GPL(clockevents_config_and_register);
int __clockevents_update_freq(struct clock_event_device *dev, u32 freq)
{
clockevents_config(dev, freq);
if (clockevent_state_oneshot(dev)) return clockevents_program_event(dev, dev->next_event, false);
if (clockevent_state_periodic(dev)) return __clockevents_switch_state(dev, CLOCK_EVT_STATE_PERIODIC);
return 0;
}
/** * clockevents_update_freq - Update frequency and reprogram a clock event device. * @dev: device to modify * @freq: new device frequency * * Reconfigure and reprogram a clock event device in oneshot * mode. Must be called on the cpu for which the device delivers per * cpu timer events. If called for the broadcast device the core takes * care of serialization. * * Returns 0 on success, -ETIME when the event is in the past.
*/ int clockevents_update_freq(struct clock_event_device *dev, u32 freq)
{ unsignedlong flags; int ret;
local_irq_save(flags);
ret = tick_broadcast_update_freq(dev, freq); if (ret == -ENODEV)
ret = __clockevents_update_freq(dev, freq);
local_irq_restore(flags); return ret;
}
/* * Noop handler when we shut down an event device
*/ void clockevents_handle_noop(struct clock_event_device *dev)
{
}
/** * clockevents_exchange_device - release and request clock devices * @old: device to release (can be NULL) * @new: device to request (can be NULL) * * Called from various tick functions with clockevents_lock held and * interrupts disabled.
*/ void clockevents_exchange_device(struct clock_event_device *old, struct clock_event_device *new)
{ /* * Caller releases a clock event device. We queue it into the * released list and do a notify add later.
*/ if (old) {
module_put(old->owner);
clockevents_switch_state(old, CLOCK_EVT_STATE_DETACHED);
list_move(&old->list, &clockevents_released);
}
if (new) {
BUG_ON(!clockevent_state_detached(new));
clockevents_shutdown(new);
}
}
list_for_each_entry(dev, &clockevent_devices, list) if (dev->resume && !clockevent_state_detached(dev))
dev->resume(dev);
}
#ifdef CONFIG_HOTPLUG_CPU
/** * tick_offline_cpu - Shutdown all clock events related * to this CPU and take it out of the * broadcast mechanism. * @cpu: The outgoing CPU * * Called by the dying CPU during teardown.
*/ void tick_offline_cpu(unsignedint cpu)
{ struct clock_event_device *dev, *tmp;
raw_spin_lock(&clockevents_lock);
tick_broadcast_offline(cpu);
tick_shutdown();
/* * Unregister the clock event devices which were * released above.
*/
list_for_each_entry_safe(dev, tmp, &clockevents_released, list)
list_del(&dev->list);
/* * Now check whether the CPU has left unused per cpu devices
*/
list_for_each_entry_safe(dev, tmp, &clockevent_devices, list) { if (cpumask_test_cpu(cpu, dev->cpumask) &&
cpumask_weight(dev->cpumask) == 1 &&
!tick_is_broadcast_device(dev)) {
BUG_ON(!clockevent_state_detached(dev));
list_del(&dev->list);
}
}
/* We don't support the abomination of removable broadcast devices */ static ssize_t unbind_device_store(struct device *dev, struct device_attribute *attr, constchar *buf, size_t count)
{ char name[CS_NAME_LEN];
ssize_t ret = sysfs_get_uname(buf, name, count); struct clock_event_device *ce = NULL, *iter;
if (ret < 0) return ret;
ret = -ENODEV;
mutex_lock(&clockevents_mutex);
raw_spin_lock_irq(&clockevents_lock);
list_for_each_entry(iter, &clockevent_devices, list) { if (!strcmp(iter->name, name)) {
ret = __clockevents_try_unbind(iter, dev->id);
ce = iter; break;
}
}
raw_spin_unlock_irq(&clockevents_lock); /* * We hold clockevents_mutex, so ce can't go away
*/ if (ret == -EAGAIN)
ret = clockevents_unbind(ce, dev->id);
mutex_unlock(&clockevents_mutex); return ret ? ret : count;
} static DEVICE_ATTR_WO(unbind_device);
Die Informationen auf dieser Webseite wurden
nach bestem Wissen sorgfältig zusammengestellt. Es wird jedoch weder Vollständigkeit, noch Richtigkeit,
noch Qualität der bereit gestellten Informationen zugesichert.
Bemerkung:
Die farbliche Syntaxdarstellung und die Messung sind noch experimentell.