/** * clocks_calc_mult_shift - calculate mult/shift factors for scaled math of clocks * @mult: pointer to mult variable * @shift: pointer to shift variable * @from: frequency to convert from * @to: frequency to convert to * @maxsec: guaranteed runtime conversion range in seconds * * The function evaluates the shift/mult pair for the scaled math * operations of clocksources and clockevents. * * @to and @from are frequency values in HZ. For clock sources @to is * NSEC_PER_SEC == 1GHz and @from is the counter frequency. For clock * event @to is the counter frequency and @from is NSEC_PER_SEC. * * The @maxsec conversion range argument controls the time frame in * seconds which must be covered by the runtime conversion with the * calculated mult and shift factors. This guarantees that no 64bit * overflow happens when the input value of the conversion is * multiplied with the calculated mult factor. Larger ranges may * reduce the conversion accuracy by choosing smaller mult and shift * factors.
*/ void
clocks_calc_mult_shift(u32 *mult, u32 *shift, u32 from, u32 to, u32 maxsec)
{
u64 tmp;
u32 sft, sftacc= 32;
/* * Calculate the shift factor which is limiting the conversion * range:
*/
tmp = ((u64)maxsec * from) >> 32; while (tmp) {
tmp >>=1;
sftacc--;
}
/* * Find the conversion shift/mult pair which has the best * accuracy and fits the maxsec conversion range:
*/ for (sft = 32; sft > 0; sft--) {
tmp = (u64) to << sft;
tmp += from / 2;
do_div(tmp, from); if ((tmp >> sftacc) == 0) break;
}
*mult = tmp;
*shift = sft;
}
EXPORT_SYMBOL_GPL(clocks_calc_mult_shift);
/*[Clocksource internal variables]--------- * curr_clocksource: * currently selected clocksource. * suspend_clocksource: * used to calculate the suspend time. * clocksource_list: * linked list with the registered clocksources * clocksource_mutex: * protects manipulations to curr_clocksource and the clocksource_list * override_name: * Name of the user-specified clocksource.
*/ staticstruct clocksource *curr_clocksource; staticstruct clocksource *suspend_clocksource; static LIST_HEAD(clocksource_list); static DEFINE_MUTEX(clocksource_mutex); staticchar override_name[CS_NAME_LEN]; staticint finished_booting; static u64 suspend_start;
/* * Maximum permissible delay between two readouts of the watchdog * clocksource surrounding a read of the clocksource being validated. * This delay could be due to SMIs, NMIs, or to VCPU preemptions. Used as * a lower bound for cs->uncertainty_margin values when registering clocks. * * The default of 500 parts per million is based on NTP's limits. * If a clocksource is good enough for NTP, it is good enough for us! * * In other words, by default, even if a clocksource is extremely * precise (for example, with a sub-nanosecond period), the maximum * permissible skew between the clocksource watchdog and the clocksource * under test is not permitted to go below the 500ppm minimum defined * by MAX_SKEW_USEC. This 500ppm minimum may be overridden using the * CLOCKSOURCE_WATCHDOG_MAX_SKEW_US Kconfig option.
*/ #ifdef CONFIG_CLOCKSOURCE_WATCHDOG_MAX_SKEW_US #define MAX_SKEW_USEC CONFIG_CLOCKSOURCE_WATCHDOG_MAX_SKEW_US #else #define MAX_SKEW_USEC (125 * WATCHDOG_INTERVAL / HZ) #endif
/* * Default for maximum permissible skew when cs->uncertainty_margin is * not specified, and the lower bound even when cs->uncertainty_margin * is specified. This is also the default that is used when registering * clocks with unspecifed cs->uncertainty_margin, so this macro is used * even in CONFIG_CLOCKSOURCE_WATCHDOG=n kernels.
*/ #define WATCHDOG_MAX_SKEW (MAX_SKEW_USEC * NSEC_PER_USEC)
staticvoid clocksource_watchdog_work(struct work_struct *work)
{ /* * We cannot directly run clocksource_watchdog_kthread() here, because * clocksource_select() calls timekeeping_notify() which uses * stop_machine(). One cannot use stop_machine() from a workqueue() due * lock inversions wrt CPU hotplug. * * Also, we only ever run this work once or twice during the lifetime * of the kernel, so there is no point in creating a more permanent * kthread for this. * * If kthread_run fails the next watchdog scan over the * watchdog_list will find the unstable clock again.
*/
kthread_run(clocksource_watchdog_kthread, NULL, "kwatchdog");
}
/* * If the clocksource is registered clocksource_watchdog_kthread() will * re-rate and re-select.
*/ if (list_empty(&cs->list)) {
cs->rating = 0; return;
}
if (cs->mark_unstable)
cs->mark_unstable(cs);
/* kick clocksource_watchdog_kthread() */ if (finished_booting)
schedule_work(&watchdog_work);
}
/** * clocksource_mark_unstable - mark clocksource unstable via watchdog * @cs: clocksource to be marked unstable * * This function is called by the x86 TSC code to mark clocksources as unstable; * it defers demotion and re-selection to a kthread.
*/ void clocksource_mark_unstable(struct clocksource *cs)
{ unsignedlong flags;
spin_lock_irqsave(&watchdog_lock, flags); if (!(cs->flags & CLOCK_SOURCE_UNSTABLE)) { if (!list_empty(&cs->list) && list_empty(&cs->wd_list))
list_add(&cs->wd_list, &watchdog_list);
__clocksource_unstable(cs);
}
spin_unlock_irqrestore(&watchdog_lock, flags);
}
wd_delay = cycles_to_nsec_safe(watchdog, *wdnow, wd_end); if (wd_delay <= md + cs->uncertainty_margin) { if (nretries > 1 && nretries >= max_retries) {
pr_warn("timekeeping watchdog on CPU%d: %s retried %d times before success\n",
smp_processor_id(), watchdog->name, nretries);
} return WD_READ_SUCCESS;
}
/* * Now compute delay in consecutive watchdog read to see if * there is too much external interferences that cause * significant delay in reading both clocksource and watchdog. * * If consecutive WD read-back delay > md, report * system busy, reinit the watchdog and skip the current * watchdog test.
*/
wd_seq_delay = cycles_to_nsec_safe(watchdog, wd_end, wd_end2); if (wd_seq_delay > md) goto skip_test;
}
pr_warn("timekeeping watchdog on CPU%d: wd-%s-wd excessive read-back delay of %lldns vs. limit of %ldns, wd-wd read-back delay only %lldns, attempt %d, marking %s unstable\n",
smp_processor_id(), cs->name, wd_delay, WATCHDOG_MAX_SKEW, wd_seq_delay, nretries, cs->name); return WD_READ_UNSTABLE;
skip_test:
pr_info("timekeeping watchdog on CPU%d: %s wd-wd read-back delay of %lldns\n",
smp_processor_id(), watchdog->name, wd_seq_delay);
pr_info("wd-%s-wd read-back delay of %lldns, clock-skew test skipped!\n",
cs->name, wd_delay); return WD_READ_SKIP;
}
staticvoid clocksource_verify_choose_cpus(void)
{ int cpu, i, n = verify_n_cpus;
if (n < 0 || n >= num_online_cpus()) { /* Check all of the CPUs. */
cpumask_copy(&cpus_chosen, cpu_online_mask);
cpumask_clear_cpu(smp_processor_id(), &cpus_chosen); return;
}
/* If no checking desired, or no other CPU to check, leave. */
cpumask_clear(&cpus_chosen); if (n == 0 || num_online_cpus() <= 1) return;
/* Make sure to select at least one CPU other than the current CPU. */
cpu = cpumask_any_but(cpu_online_mask, smp_processor_id()); if (WARN_ON_ONCE(cpu >= nr_cpu_ids)) return;
cpumask_set_cpu(cpu, &cpus_chosen);
/* Force a sane value for the boot parameter. */ if (n > nr_cpu_ids)
n = nr_cpu_ids;
/* * Randomly select the specified number of CPUs. If the same * CPU is selected multiple times, that CPU is checked only once, * and no replacement CPU is selected. This gracefully handles * situations where verify_n_cpus is greater than the number of * CPUs that are currently online.
*/ for (i = 1; i < n; i++) {
cpu = cpumask_random(cpu_online_mask); if (!WARN_ON_ONCE(cpu >= nr_cpu_ids))
cpumask_set_cpu(cpu, &cpus_chosen);
}
/* Clocksource already marked unstable? */ if (cs->flags & CLOCK_SOURCE_UNSTABLE) { if (finished_booting)
schedule_work(&watchdog_work); continue;
}
read_ret = cs_watchdog_read(cs, &csnow, &wdnow);
if (read_ret == WD_READ_UNSTABLE) { /* Clock readout unreliable, so give it up. */
__clocksource_unstable(cs); continue;
}
/* * When WD_READ_SKIP is returned, it means the system is likely * under very heavy load, where the latency of reading * watchdog/clocksource is very big, and affect the accuracy of * watchdog check. So give system some space and suspend the * watchdog check for 5 minutes.
*/ if (read_ret == WD_READ_SKIP) { /* * As the watchdog timer will be suspended, and * cs->last could keep unchanged for 5 minutes, reset * the counters.
*/
clocksource_reset_watchdog();
extra_wait = HZ * 300; break;
}
wd_nsec = cycles_to_nsec_safe(watchdog, cs->wd_last, wdnow);
cs_nsec = cycles_to_nsec_safe(cs, cs->cs_last, csnow);
wdlast = cs->wd_last; /* save these in case we print them */
cslast = cs->cs_last;
cs->cs_last = csnow;
cs->wd_last = wdnow;
if (atomic_read(&watchdog_reset_pending)) continue;
/* * The processing of timer softirqs can get delayed (usually * on account of ksoftirqd not getting to run in a timely * manner), which causes the watchdog interval to stretch. * Skew detection may fail for longer watchdog intervals * on account of fixed margins being used. * Some clocksources, e.g. acpi_pm, cannot tolerate * watchdog intervals longer than a few seconds.
*/
interval = max(cs_nsec, wd_nsec); if (unlikely(interval > WATCHDOG_INTERVAL_MAX_NS)) { if (system_state > SYSTEM_SCHEDULING &&
interval > 2 * watchdog_max_interval) {
watchdog_max_interval = interval;
pr_warn("Long readout interval, skipping watchdog check: cs_nsec: %lld wd_nsec: %lld\n",
cs_nsec, wd_nsec);
}
watchdog_timer.expires = jiffies; continue;
}
/* Check the deviation from the watchdog clocksource. */
md = cs->uncertainty_margin + watchdog->uncertainty_margin; if (abs(cs_nsec - wd_nsec) > md) {
s64 cs_wd_msec;
s64 wd_msec;
u32 wd_rem;
pr_warn("timekeeping watchdog on CPU%d: Marking clocksource '%s' as unstable because the skew is too large:\n",
smp_processor_id(), cs->name);
pr_warn(" '%s' wd_nsec: %lld wd_now: %llx wd_last: %llx mask: %llx\n",
watchdog->name, wd_nsec, wdnow, wdlast, watchdog->mask);
pr_warn(" '%s' cs_nsec: %lld cs_now: %llx cs_last: %llx mask: %llx\n",
cs->name, cs_nsec, csnow, cslast, cs->mask);
cs_wd_msec = div_s64_rem(cs_nsec - wd_nsec, 1000 * 1000, &wd_rem);
wd_msec = div_s64_rem(wd_nsec, 1000 * 1000, &wd_rem);
pr_warn(" Clocksource '%s' skewed %lld ns (%lld ms) over watchdog '%s' interval of %lld ns (%lld ms)\n",
cs->name, cs_nsec - wd_nsec, cs_wd_msec, watchdog->name, wd_nsec, wd_msec); if (curr_clocksource == cs)
pr_warn(" '%s' is current clocksource.\n", cs->name); elseif (curr_clocksource)
pr_warn(" '%s' (not '%s') is current clocksource.\n", curr_clocksource->name, cs->name); else
pr_warn(" No current clocksource.\n");
__clocksource_unstable(cs); continue;
}
if (cs == curr_clocksource && cs->tick_stable)
cs->tick_stable(cs);
if (!(cs->flags & CLOCK_SOURCE_VALID_FOR_HRES) &&
(cs->flags & CLOCK_SOURCE_IS_CONTINUOUS) &&
(watchdog->flags & CLOCK_SOURCE_IS_CONTINUOUS)) { /* Mark it valid for high-res. */
cs->flags |= CLOCK_SOURCE_VALID_FOR_HRES;
/* * clocksource_done_booting() will sort it if * finished_booting is not set yet.
*/ if (!finished_booting) continue;
/* * If this is not the current clocksource let * the watchdog thread reselect it. Due to the * change to high res this clocksource might * be preferred now. If it is the current * clocksource let the tick code know about * that change.
*/ if (cs != curr_clocksource) {
cs->flags |= CLOCK_SOURCE_RESELECT;
schedule_work(&watchdog_work);
} else {
tick_clock_notify();
}
}
}
/* * We only clear the watchdog_reset_pending, when we did a * full cycle through all clocksources.
*/ if (reset_pending)
atomic_dec(&watchdog_reset_pending);
/* * Cycle through CPUs to check if the CPUs stay synchronized * to each other.
*/
next_cpu = cpumask_next_wrap(raw_smp_processor_id(), cpu_online_mask);
/* * Arm timer if not already pending: could race with concurrent * pair clocksource_stop_watchdog() clocksource_start_watchdog().
*/ if (!timer_pending(&watchdog_timer)) {
watchdog_timer.expires += WATCHDOG_INTERVAL + extra_wait;
add_timer_on(&watchdog_timer, next_cpu);
}
out:
spin_unlock(&watchdog_lock);
}
if (cs->flags & CLOCK_SOURCE_MUST_VERIFY) { /* cs is a clocksource to be watched. */
list_add(&cs->wd_list, &watchdog_list);
cs->flags &= ~CLOCK_SOURCE_WATCHDOG;
} else { /* cs is a watchdog. */ if (cs->flags & CLOCK_SOURCE_IS_CONTINUOUS)
cs->flags |= CLOCK_SOURCE_VALID_FOR_HRES;
}
}
spin_lock_irqsave(&watchdog_lock, flags); /* save current watchdog */
old_wd = watchdog; if (fallback)
watchdog = NULL;
list_for_each_entry(cs, &clocksource_list, list) { /* cs is a clocksource to be watched. */ if (cs->flags & CLOCK_SOURCE_MUST_VERIFY) continue;
/* Skip current if we were requested for a fallback. */ if (fallback && cs == old_wd) continue;
/* Pick the best watchdog. */ if (!watchdog || cs->rating > watchdog->rating)
watchdog = cs;
} /* If we failed to find a fallback restore the old one. */ if (!watchdog)
watchdog = old_wd;
/* If we changed the watchdog we need to reset cycles. */ if (watchdog != old_wd)
clocksource_reset_watchdog();
/* Check if the watchdog timer needs to be started. */
clocksource_start_watchdog();
spin_unlock_irqrestore(&watchdog_lock, flags);
}
staticvoid clocksource_dequeue_watchdog(struct clocksource *cs)
{ if (cs != watchdog) { if (cs->flags & CLOCK_SOURCE_MUST_VERIFY) { /* cs is a watched clocksource. */
list_del_init(&cs->wd_list); /* Check if the watchdog timer needs to be stopped. */
clocksource_stop_watchdog();
}
}
}
staticvoid __clocksource_suspend_select(struct clocksource *cs)
{ /* * Skip the clocksource which will be stopped in suspend state.
*/ if (!(cs->flags & CLOCK_SOURCE_SUSPEND_NONSTOP)) return;
/* * The nonstop clocksource can be selected as the suspend clocksource to * calculate the suspend time, so it should not supply suspend/resume * interfaces to suspend the nonstop clocksource when system suspends.
*/ if (cs->suspend || cs->resume) {
pr_warn("Nonstop clocksource %s should not supply suspend/resume interfaces\n",
cs->name);
}
/* Pick the best rating. */ if (!suspend_clocksource || cs->rating > suspend_clocksource->rating)
suspend_clocksource = cs;
}
/** * clocksource_suspend_select - Select the best clocksource for suspend timing * @fallback: if select a fallback clocksource
*/ staticvoid clocksource_suspend_select(bool fallback)
{ struct clocksource *cs, *old_suspend;
old_suspend = suspend_clocksource; if (fallback)
suspend_clocksource = NULL;
list_for_each_entry(cs, &clocksource_list, list) { /* Skip current if we were requested for a fallback. */ if (fallback && cs == old_suspend) continue;
__clocksource_suspend_select(cs);
}
}
/** * clocksource_start_suspend_timing - Start measuring the suspend timing * @cs: current clocksource from timekeeping * @start_cycles: current cycles from timekeeping * * This function will save the start cycle values of suspend timer to calculate * the suspend time when resuming system. * * This function is called late in the suspend process from timekeeping_suspend(), * that means processes are frozen, non-boot cpus and interrupts are disabled * now. It is therefore possible to start the suspend timer without taking the * clocksource mutex.
*/ void clocksource_start_suspend_timing(struct clocksource *cs, u64 start_cycles)
{ if (!suspend_clocksource) return;
/* * If current clocksource is the suspend timer, we should use the * tkr_mono.cycle_last value as suspend_start to avoid same reading * from suspend timer.
*/ if (clocksource_is_suspend(cs)) {
suspend_start = start_cycles; return;
}
if (suspend_clocksource->enable &&
suspend_clocksource->enable(suspend_clocksource)) {
pr_warn_once("Failed to enable the non-suspend-able clocksource.\n"); return;
}
/** * clocksource_stop_suspend_timing - Stop measuring the suspend timing * @cs: current clocksource from timekeeping * @cycle_now: current cycles from timekeeping * * This function will calculate the suspend time from suspend timer. * * Returns nanoseconds since suspend started, 0 if no usable suspend clocksource. * * This function is called early in the resume process from timekeeping_resume(), * that means there is only one cpu, no processes are running and the interrupts * are disabled. It is therefore possible to stop the suspend timer without * taking the clocksource mutex.
*/
u64 clocksource_stop_suspend_timing(struct clocksource *cs, u64 cycle_now)
{
u64 now, nsec = 0;
if (!suspend_clocksource) return 0;
/* * If current clocksource is the suspend timer, we should use the * tkr_mono.cycle_last value from timekeeping as current cycle to * avoid same reading from suspend timer.
*/ if (clocksource_is_suspend(cs))
now = cycle_now; else
now = suspend_clocksource->read(suspend_clocksource);
if (now > suspend_start)
nsec = cycles_to_nsec_safe(suspend_clocksource, suspend_start, now);
/* * Disable the suspend timer to save power if current clocksource is * not the suspend timer.
*/ if (!clocksource_is_suspend(cs) && suspend_clocksource->disable)
suspend_clocksource->disable(suspend_clocksource);
list_for_each_entry(cs, &clocksource_list, list) if (cs->resume)
cs->resume(cs);
clocksource_resume_watchdog();
}
/** * clocksource_touch_watchdog - Update watchdog * * Update the watchdog after exception contexts such as kgdb so as not * to incorrectly trip the watchdog. This might fail when the kernel * was stopped in code which holds watchdog_lock.
*/ void clocksource_touch_watchdog(void)
{
clocksource_resume_watchdog();
}
/** * clocksource_max_adjustment- Returns max adjustment amount * @cs: Pointer to clocksource *
*/ static u32 clocksource_max_adjustment(struct clocksource *cs)
{
u64 ret; /* * We won't try to correct for more than 11% adjustments (110,000 ppm),
*/
ret = (u64)cs->mult * 11;
do_div(ret,100); return (u32)ret;
}
/** * clocks_calc_max_nsecs - Returns maximum nanoseconds that can be converted * @mult: cycle to nanosecond multiplier * @shift: cycle to nanosecond divisor (power of two) * @maxadj: maximum adjustment value to mult (~11%) * @mask: bitmask for two's complement subtraction of non 64 bit counters * @max_cyc: maximum cycle value before potential overflow (does not include * any safety margin) * * NOTE: This function includes a safety margin of 50%, in other words, we * return half the number of nanoseconds the hardware counter can technically * cover. This is done so that we can potentially detect problems caused by * delayed timers or bad hardware, which might result in time intervals that * are larger than what the math used can handle without overflows.
*/
u64 clocks_calc_max_nsecs(u32 mult, u32 shift, u32 maxadj, u64 mask, u64 *max_cyc)
{
u64 max_nsecs, max_cycles;
/* * Calculate the maximum number of cycles that we can pass to the * cyc2ns() function without overflowing a 64-bit result.
*/
max_cycles = ULLONG_MAX;
do_div(max_cycles, mult+maxadj);
/* * The actual maximum number of cycles we can defer the clocksource is * determined by the minimum of max_cycles and mask. * Note: Here we subtract the maxadj to make sure we don't sleep for * too long if there's a large negative adjustment.
*/
max_cycles = min(max_cycles, mask);
max_nsecs = clocksource_cyc2ns(max_cycles, mult - maxadj, shift);
/* return the max_cycles value as well if requested */ if (max_cyc)
*max_cyc = max_cycles;
/* Return 50% of the actual maximum, so we can detect bad values */
max_nsecs >>= 1;
return max_nsecs;
}
/** * clocksource_update_max_deferment - Updates the clocksource max_idle_ns & max_cycles * @cs: Pointer to clocksource to be updated *
*/ staticinlinevoid clocksource_update_max_deferment(struct clocksource *cs)
{
cs->max_idle_ns = clocks_calc_max_nsecs(cs->mult, cs->shift,
cs->maxadj, cs->mask,
&cs->max_cycles);
/* * Threshold for detecting negative motion in clocksource_delta(). * * Allow for 0.875 of the counter width so that overly long idle * sleeps, which go slightly over mask/2, do not trigger the * negative motion detection.
*/
cs->max_raw_delta = (cs->mask >> 1) + (cs->mask >> 2) + (cs->mask >> 3);
}
if (!finished_booting || list_empty(&clocksource_list)) return NULL;
/* * We pick the clocksource with the highest rating. If oneshot * mode is active, we pick the highres valid clocksource with * the best rating.
*/
list_for_each_entry(cs, &clocksource_list, list) { if (skipcur && cs == curr_clocksource) continue; if (oneshot && !(cs->flags & CLOCK_SOURCE_VALID_FOR_HRES)) continue; return cs;
} return NULL;
}
/* Find the best suitable clocksource */
best = clocksource_find_best(oneshot, skipcur); if (!best) return;
if (!strlen(override_name)) goto found;
/* Check for the override clocksource. */
list_for_each_entry(cs, &clocksource_list, list) { if (skipcur && cs == curr_clocksource) continue; if (strcmp(cs->name, override_name) != 0) continue; /* * Check to make sure we don't switch to a non-highres * capable clocksource if the tick code is in oneshot * mode (highres or nohz)
*/ if (!(cs->flags & CLOCK_SOURCE_VALID_FOR_HRES) && oneshot) { /* Override clocksource cannot be used. */ if (cs->flags & CLOCK_SOURCE_UNSTABLE) {
pr_warn("Override clocksource %s is unstable and not HRT compatible - cannot switch while in HRT/NOHZ mode\n",
cs->name);
override_name[0] = 0;
} else { /* * The override cannot be currently verified. * Deferring to let the watchdog check.
*/
pr_info("Override clocksource %s is not currently HRT compatible - deferring\n",
cs->name);
}
} else /* Override clocksource can be used. */
best = cs; break;
}
found: if (curr_clocksource != best && !timekeeping_notify(best)) {
pr_info("Switched to clocksource %s\n", best->name);
curr_clocksource = best;
}
}
/** * clocksource_select - Select the best clocksource available * * Private function. Must hold clocksource_mutex when called. * * Select the clocksource with the best rating, or the clocksource, * which is selected by userspace override.
*/ staticvoid clocksource_select(void)
{
__clocksource_select(false);
}
/* * clocksource_done_booting - Called near the end of core bootup * * Hack to avoid lots of clocksource churn at boot time. * We use fs_initcall because we want this to start before * device_initcall but after subsys_initcall.
*/ staticint __init clocksource_done_booting(void)
{
mutex_lock(&clocksource_mutex);
curr_clocksource = clocksource_default_clock();
finished_booting = 1; /* * Run the watchdog first to eliminate unstable clock sources
*/
__clocksource_watchdog_kthread();
clocksource_select();
mutex_unlock(&clocksource_mutex); return 0;
}
fs_initcall(clocksource_done_booting);
list_for_each_entry(tmp, &clocksource_list, list) { /* Keep track of the place, where to insert */ if (tmp->rating < cs->rating) break;
entry = &tmp->list;
}
list_add(&cs->list, entry);
}
/** * __clocksource_update_freq_scale - Used update clocksource with new freq * @cs: clocksource to be registered * @scale: Scale factor multiplied against freq to get clocksource hz * @freq: clocksource frequency (cycles per second) divided by scale * * This should only be called from the clocksource->enable() method. * * This *SHOULD NOT* be called directly! Please use the * __clocksource_update_freq_hz() or __clocksource_update_freq_khz() helper * functions.
*/ void __clocksource_update_freq_scale(struct clocksource *cs, u32 scale, u32 freq)
{
u64 sec;
/* * Default clocksources are *special* and self-define their mult/shift. * But, you're not special, so you should specify a freq value.
*/ if (freq) { /* * Calc the maximum number of seconds which we can run before * wrapping around. For clocksources which have a mask > 32-bit * we need to limit the max sleep time to have a good * conversion precision. 10 minutes is still a reasonable * amount. That results in a shift value of 24 for a * clocksource with mask >= 40-bit and f >= 4GHz. That maps to * ~ 0.06ppm granularity for NTP.
*/
sec = cs->mask;
do_div(sec, freq);
do_div(sec, scale); if (!sec)
sec = 1; elseif (sec > 600 && cs->mask > UINT_MAX)
sec = 600;
/* * If the uncertainty margin is not specified, calculate it. If * both scale and freq are non-zero, calculate the clock period, but * bound below at 2*WATCHDOG_MAX_SKEW, that is, 500ppm by default. * However, if either of scale or freq is zero, be very conservative * and take the tens-of-milliseconds WATCHDOG_THRESHOLD value * for the uncertainty margin. Allow stupidly small uncertainty * margins to be specified by the caller for testing purposes, * but warn to discourage production use of this capability. * * Bottom line: The sum of the uncertainty margins of the * watchdog clocksource and the clocksource under test will be at * least 500ppm by default. For more information, please see the * comment preceding CONFIG_CLOCKSOURCE_WATCHDOG_MAX_SKEW_US above.
*/ if (scale && freq && !cs->uncertainty_margin) {
cs->uncertainty_margin = NSEC_PER_SEC / (scale * freq); if (cs->uncertainty_margin < 2 * WATCHDOG_MAX_SKEW)
cs->uncertainty_margin = 2 * WATCHDOG_MAX_SKEW;
} elseif (!cs->uncertainty_margin) {
cs->uncertainty_margin = WATCHDOG_THRESHOLD;
}
WARN_ON_ONCE(cs->uncertainty_margin < 2 * WATCHDOG_MAX_SKEW);
/* * Ensure clocksources that have large 'mult' values don't overflow * when adjusted.
*/
cs->maxadj = clocksource_max_adjustment(cs); while (freq && ((cs->mult + cs->maxadj < cs->mult)
|| (cs->mult - cs->maxadj > cs->mult))) {
cs->mult >>= 1;
cs->shift--;
cs->maxadj = clocksource_max_adjustment(cs);
}
/* * Only warn for *special* clocksources that self-define * their mult/shift values and don't specify a freq.
*/
WARN_ONCE(cs->mult + cs->maxadj < cs->mult, "timekeeping: Clocksource %s might overflow on 11%% adjustment\n",
cs->name);
/** * __clocksource_register_scale - Used to install new clocksources * @cs: clocksource to be registered * @scale: Scale factor multiplied against freq to get clocksource hz * @freq: clocksource frequency (cycles per second) divided by scale * * Returns -EBUSY if registration fails, zero otherwise. * * This *SHOULD NOT* be called directly! Please use the * clocksource_register_hz() or clocksource_register_khz helper functions.
*/ int __clocksource_register_scale(struct clocksource *cs, u32 scale, u32 freq)
{ unsignedlong flags;
/* * Unbind clocksource @cs. Called with clocksource_mutex held
*/ staticint clocksource_unbind(struct clocksource *cs)
{ unsignedlong flags;
if (clocksource_is_watchdog(cs)) { /* Select and try to install a replacement watchdog. */
clocksource_select_watchdog(true); if (clocksource_is_watchdog(cs)) return -EBUSY;
}
if (cs == curr_clocksource) { /* Select and try to install a replacement clock source */
clocksource_select_fallback(); if (curr_clocksource == cs) return -EBUSY;
}
if (clocksource_is_suspend(cs)) { /* * Select and try to install a replacement suspend clocksource. * If no replacement suspend clocksource, we will just let the * clocksource go and have no suspend clocksource.
*/
clocksource_suspend_select(true);
}
/** * clocksource_unregister - remove a registered clocksource * @cs: clocksource to be unregistered
*/ int clocksource_unregister(struct clocksource *cs)
{ int ret = 0;
mutex_lock(&clocksource_mutex); if (!list_empty(&cs->list))
ret = clocksource_unbind(cs);
mutex_unlock(&clocksource_mutex); return ret;
}
EXPORT_SYMBOL(clocksource_unregister);
#ifdef CONFIG_SYSFS /** * current_clocksource_show - sysfs interface for current clocksource * @dev: unused * @attr: unused * @buf: char buffer to be filled with clocksource list * * Provides sysfs interface for listing current clocksource.
*/ static ssize_t current_clocksource_show(struct device *dev, struct device_attribute *attr, char *buf)
{
ssize_t count = 0;
/** * boot_override_clock - Compatibility layer for deprecated boot option * @str: override name * * DEPRECATED! Takes a clock= boot argument and uses it * as the clocksource override name
*/ staticint __init boot_override_clock(char* str)
{ if (!strcmp(str, "pmtmr")) {
pr_warn("clock=pmtmr is deprecated - use clocksource=acpi_pm\n"); return boot_override_clocksource("acpi_pm");
}
pr_warn("clock= boot option is deprecated - use clocksource=xyz\n"); return boot_override_clocksource(str);
}
__setup("clock=", boot_override_clock);
Messung V0.5
¤ Dauer der Verarbeitung: 0.15 Sekunden
(vorverarbeitet)
¤
Die Informationen auf dieser Webseite wurden
nach bestem Wissen sorgfältig zusammengestellt. Es wird jedoch weder Vollständigkeit, noch Richtigkeit,
noch Qualität der bereit gestellten Informationen zugesichert.
Bemerkung:
Die farbliche Syntaxdarstellung und die Messung sind noch experimentell.