/** * struct ntp_data - Structure holding all NTP related state * @tick_usec: USER_HZ period in microseconds * @tick_length: Adjusted tick length * @tick_length_base: Base value for @tick_length * @time_state: State of the clock synchronization * @time_status: Clock status bits * @time_offset: Time adjustment in nanoseconds * @time_constant: PLL time constant * @time_maxerror: Maximum error in microseconds holding the NTP sync distance * (NTP dispersion + delay / 2) * @time_esterror: Estimated error in microseconds holding NTP dispersion * @time_freq: Frequency offset scaled nsecs/secs * @time_reftime: Time at last adjustment in seconds * @time_adjust: Adjustment value * @ntp_tick_adj: Constant boot-param configurable NTP tick adjustment (upscaled) * @ntp_next_leap_sec: Second value of the next pending leapsecond, or TIME64_MAX if no leap * * @pps_valid: PPS signal watchdog counter * @pps_tf: PPS phase median filter * @pps_jitter: PPS current jitter in nanoseconds * @pps_fbase: PPS beginning of the last freq interval * @pps_shift: PPS current interval duration in seconds (shift value) * @pps_intcnt: PPS interval counter * @pps_freq: PPS frequency offset in scaled ns/s * @pps_stabil: PPS current stability in scaled ns/s * @pps_calcnt: PPS monitor: calibration intervals * @pps_jitcnt: PPS monitor: jitter limit exceeded * @pps_stbcnt: PPS monitor: stability limit exceeded * @pps_errcnt: PPS monitor: calibration errors * * Protected by the timekeeping locks.
*/ struct ntp_data { unsignedlong tick_usec;
u64 tick_length;
u64 tick_length_base; int time_state; int time_status;
s64 time_offset; long time_constant; long time_maxerror; long time_esterror;
s64 time_freq;
time64_t time_reftime; long time_adjust;
s64 ntp_tick_adj;
time64_t ntp_next_leap_sec; #ifdef CONFIG_NTP_PPS int pps_valid; long pps_tf[3]; long pps_jitter; struct timespec64 pps_fbase; int pps_shift; int pps_intcnt;
s64 pps_freq; long pps_stabil; long pps_calcnt; long pps_jitcnt; long pps_stbcnt; long pps_errcnt; #endif
};
/* * The following variables are used when a pulse-per-second (PPS) signal * is available. They establish the engineering parameters of the clock * discipline loop when controlled by the PPS signal.
*/ #define PPS_VALID 10 /* PPS signal watchdog max (s) */ #define PPS_POPCORN 4 /* popcorn spike threshold (shift) */ #define PPS_INTMIN 2 /* min freq interval (s) (shift) */ #define PPS_INTMAX 8 /* max freq interval (s) (shift) */ #define PPS_INTCOUNT 4 /* number of consecutive good intervals to increase pps_shift or consecutive bad
intervals to decrease it */ #define PPS_MAXWANDER 100000 /* max PPS freq wander (ns/s) */
/* * PPS kernel consumer compensates the whole phase error immediately. * Otherwise, reduce the offset by a fixed factor times the time constant.
*/ staticinline s64 ntp_offset_chunk(struct ntp_data *ntpdata, s64 offset)
{ if (ntpdata->time_status & STA_PPSTIME && ntpdata->time_status & STA_PPSSIGNAL) return offset; else return shift_right(offset, SHIFT_PLL + ntpdata->time_constant);
}
staticinlinevoid pps_reset_freq_interval(struct ntp_data *ntpdata)
{ /* The PPS calibration interval may end surprisingly early */
ntpdata->pps_shift = PPS_INTMIN;
ntpdata->pps_intcnt = 0;
}
/** * pps_clear - Clears the PPS state variables * @ntpdata: Pointer to ntp data
*/ staticinlinevoid pps_clear(struct ntp_data *ntpdata)
{
pps_reset_freq_interval(ntpdata);
ntpdata->pps_tf[0] = 0;
ntpdata->pps_tf[1] = 0;
ntpdata->pps_tf[2] = 0;
ntpdata->pps_fbase.tv_sec = ntpdata->pps_fbase.tv_nsec = 0;
ntpdata->pps_freq = 0;
}
/* * Decrease pps_valid to indicate that another second has passed since the * last PPS signal. When it reaches 0, indicate that PPS signal is missing.
*/ staticinlinevoid pps_dec_valid(struct ntp_data *ntpdata)
{ if (ntpdata->pps_valid > 0) {
ntpdata->pps_valid--;
} else {
ntpdata->time_status &= ~(STA_PPSSIGNAL | STA_PPSJITTER |
STA_PPSWANDER | STA_PPSERROR);
pps_clear(ntpdata);
}
}
/* * Don't wait for the next second_overflow, apply the change to the * tick length immediately:
*/
ntpdata->tick_length += new_base - ntpdata->tick_length_base;
ntpdata->tick_length_base = new_base;
}
staticvoid ntp_update_offset(struct ntp_data *ntpdata, long offset)
{
s64 freq_adj, offset64; long secs, real_secs;
if (!(ntpdata->time_status & STA_PLL)) return;
if (!(ntpdata->time_status & STA_NANO)) { /* Make sure the multiplication below won't overflow */
offset = clamp(offset, -USEC_PER_SEC, USEC_PER_SEC);
offset *= NSEC_PER_USEC;
}
/* Scale the phase adjustment and clamp to the operating range. */
offset = clamp(offset, -MAXPHASE, MAXPHASE);
/* * Select how the frequency is to be controlled * and in which mode (PLL or FLL).
*/
real_secs = ktime_get_ntp_seconds(ntpdata - tk_ntp_data);
secs = (long)(real_secs - ntpdata->time_reftime); if (unlikely(ntpdata->time_status & STA_FREQHOLD))
secs = 0;
/** * ntp_clear - Clears the NTP state variables * @tkid: Timekeeper ID to be able to select proper ntp data array member
*/ void ntp_clear(unsignedint tkid)
{
__ntp_clear(&tk_ntp_data[tkid]);
}
/** * ntp_get_next_leap - Returns the next leapsecond in CLOCK_REALTIME ktime_t * @tkid: Timekeeper ID * * Returns: For @tkid == TIMEKEEPER_CORE this provides the time of the next * leap second against CLOCK_REALTIME in a ktime_t format if a * leap second is pending. KTIME_MAX otherwise.
*/
ktime_t ntp_get_next_leap(unsignedint tkid)
{ struct ntp_data *ntpdata = &tk_ntp_data[TIMEKEEPER_CORE];
/* * This routine handles the overflow of the microsecond field * * The tricky bits of code to handle the accurate clock support * were provided by Dave Mills (Mills@UDEL.EDU) of NTP fame. * They were originally developed for SUN and DEC kernels. * All the kudos should go to Dave for this stuff. * * Also handles leap second processing, and returns leap offset
*/ int second_overflow(unsignedint tkid, time64_t secs)
{ struct ntp_data *ntpdata = &tk_ntp_data[tkid];
s64 delta; int leap = 0;
s32 rem;
/* * Leap second processing. If in leap-insert state at the end of the * day, the system clock is set back one second; if in leap-delete * state, the system clock is set ahead one second.
*/ switch (ntpdata->time_state) { case TIME_OK: if (ntpdata->time_status & STA_INS) {
ntpdata->time_state = TIME_INS;
div_s64_rem(secs, SECS_PER_DAY, &rem);
ntpdata->ntp_next_leap_sec = secs + SECS_PER_DAY - rem;
} elseif (ntpdata->time_status & STA_DEL) {
ntpdata->time_state = TIME_DEL;
div_s64_rem(secs + 1, SECS_PER_DAY, &rem);
ntpdata->ntp_next_leap_sec = secs + SECS_PER_DAY - rem;
} break; case TIME_INS: if (!(ntpdata->time_status & STA_INS)) {
ntpdata->ntp_next_leap_sec = TIME64_MAX;
ntpdata->time_state = TIME_OK;
} elseif (secs == ntpdata->ntp_next_leap_sec) {
leap = -1;
ntpdata->time_state = TIME_OOP;
pr_notice("Clock: inserting leap second 23:59:60 UTC\n");
} break; case TIME_DEL: if (!(ntpdata->time_status & STA_DEL)) {
ntpdata->ntp_next_leap_sec = TIME64_MAX;
ntpdata->time_state = TIME_OK;
} elseif (secs == ntpdata->ntp_next_leap_sec) {
leap = 1;
ntpdata->ntp_next_leap_sec = TIME64_MAX;
ntpdata->time_state = TIME_WAIT;
pr_notice("Clock: deleting leap second 23:59:59 UTC\n");
} break; case TIME_OOP:
ntpdata->ntp_next_leap_sec = TIME64_MAX;
ntpdata->time_state = TIME_WAIT; break; case TIME_WAIT: if (!(ntpdata->time_status & (STA_INS | STA_DEL)))
ntpdata->time_state = TIME_OK; break;
}
/* Bump the maxerror field */
ntpdata->time_maxerror += MAXFREQ / NSEC_PER_USEC; if (ntpdata->time_maxerror > NTP_PHASE_LIMIT) {
ntpdata->time_maxerror = NTP_PHASE_LIMIT;
ntpdata->time_status |= STA_UNSYNC;
}
/* Compute the phase adjustment for the next second */
ntpdata->tick_length = ntpdata->tick_length_base;
/* * Check whether @now is correct versus the required time to update the RTC * and calculate the value which needs to be written to the RTC so that the * next seconds increment of the RTC after the write is aligned with the next * seconds increment of clock REALTIME. * * tsched t1 write(t2.tv_sec - 1sec)) t2 RTC increments seconds * * t2.tv_nsec == 0 * tsched = t2 - set_offset_nsec * newval = t2 - NSEC_PER_SEC * * ==> neval = tsched + set_offset_nsec - NSEC_PER_SEC * * As the execution of this code is not guaranteed to happen exactly at * tsched this allows it to happen within a fuzzy region: * * abs(now - tsched) < FUZZ * * If @now is not inside the allowed window the function returns false.
*/ staticinlinebool rtc_tv_nsec_ok(unsignedlong set_offset_nsec, struct timespec64 *to_set, conststruct timespec64 *now)
{ /* Allowed error in tv_nsec, arbitrarily set to 5 jiffies in ns. */ constunsignedlong TIME_SET_NSEC_FUZZ = TICK_NSEC * 5; struct timespec64 delay = {.tv_sec = -1,
.tv_nsec = set_offset_nsec};
*to_set = timespec64_add(*now, delay);
if (to_set->tv_nsec < TIME_SET_NSEC_FUZZ) {
to_set->tv_nsec = 0; returntrue;
}
#ifdef CONFIG_RTC_SYSTOHC /* Save NTP synchronized time to the RTC */ staticint update_rtc(struct timespec64 *to_set, unsignedlong *offset_nsec)
{ struct rtc_device *rtc; struct rtc_time tm; int err = -ENODEV;
rtc = rtc_class_open(CONFIG_RTC_SYSTOHC_DEVICE); if (!rtc) return -ENODEV;
if (!rtc->ops || !rtc->ops->set_time) goto out_close;
/* First call might not have the correct offset */ if (*offset_nsec == rtc->set_offset_nsec) {
rtc_time64_to_tm(to_set->tv_sec, &tm);
err = rtc_set_time(rtc, &tm);
} else { /* Store the update offset and let the caller try again */
*offset_nsec = rtc->set_offset_nsec;
err = -EAGAIN;
}
out_close:
rtc_class_close(rtc); return err;
} #else staticinlineint update_rtc(struct timespec64 *to_set, unsignedlong *offset_nsec)
{ return -ENODEV;
} #endif
/** * ntp_synced - Tells whether the NTP status is not UNSYNC * Returns: true if not UNSYNC, false otherwise
*/ staticinlinebool ntp_synced(void)
{ return !(tk_ntp_data[TIMEKEEPER_CORE].time_status & STA_UNSYNC);
}
/* * If we have an externally synchronized Linux clock, then update RTC clock * accordingly every ~11 minutes. Generally RTCs can only store second * precision, but many RTCs will adjust the phase of their second tick to * match the moment of update. This infrastructure arranges to call to the RTC * set at the correct moment to phase synchronize the RTC second tick over * with the kernel clock.
*/ staticvoid sync_hw_clock(struct work_struct *work)
{ /* * The default synchronization offset is 500ms for the deprecated * update_persistent_clock64() under the assumption that it uses * the infamous CMOS clock (MC146818).
*/ staticunsignedlong offset_nsec = NSEC_PER_SEC / 2; struct timespec64 now, to_set; int res = -EAGAIN;
/* * Don't update if STA_UNSYNC is set and if ntp_notify_cmos_timer() * managed to schedule the work between the timer firing and the * work being able to rearm the timer. Wait for the timer to expire.
*/ if (!ntp_synced() || hrtimer_is_queued(&sync_hrtimer)) return;
ktime_get_real_ts64(&now); /* If @now is not in the allowed window, try again */ if (!rtc_tv_nsec_ok(offset_nsec, &to_set, &now)) goto rearm;
/* Take timezone adjusted RTCs into account */ if (persistent_clock_is_local)
to_set.tv_sec -= (sys_tz.tz_minuteswest * 60);
/* Try the legacy RTC first. */
res = update_persistent_clock64(to_set); if (res != -ENODEV) goto rearm;
/* Try the RTC class */
res = update_rtc(&to_set, &offset_nsec); if (res == -ENODEV) return;
rearm:
sched_sync_hw_clock(offset_nsec, res != 0);
}
void ntp_notify_cmos_timer(bool offset_set)
{ /* * If the time jumped (using ADJ_SETOFFSET) cancels sync timer, * which may have been running if the time was synchronized * prior to the ADJ_SETOFFSET call.
*/ if (offset_set)
hrtimer_cancel(&sync_hrtimer);
/* * When the work is currently executed but has not yet the timer * rearmed this queues the work immediately again. No big issue, * just a pointless work scheduled.
*/ if (ntp_synced() && !hrtimer_is_queued(&sync_hrtimer))
queue_work(system_freezable_power_efficient_wq, &sync_work);
}
/* * Propagate a new txc->status value into the NTP state:
*/ staticinlinevoid process_adj_status(struct ntp_data *ntpdata, conststruct __kernel_timex *txc)
{ if ((ntpdata->time_status & STA_PLL) && !(txc->status & STA_PLL)) {
ntpdata->time_state = TIME_OK;
ntpdata->time_status = STA_UNSYNC;
ntpdata->ntp_next_leap_sec = TIME64_MAX; /* Restart PPS frequency calibration */
pps_reset_freq_interval(ntpdata);
}
/* * If we turn on PLL adjustments then reset the * reference time to current time.
*/ if (!(ntpdata->time_status & STA_PLL) && (txc->status & STA_PLL))
ntpdata->time_reftime = ktime_get_ntp_seconds(ntpdata - tk_ntp_data);
/* only set allowed bits */
ntpdata->time_status &= STA_RONLY;
ntpdata->time_status |= txc->status & ~STA_RONLY;
}
/* Handle leapsec adjustments */ if (unlikely(ts->tv_sec >= ntpdata->ntp_next_leap_sec)) { if ((ntpdata->time_state == TIME_INS) && (ntpdata->time_status & STA_INS)) {
result = TIME_OOP;
txc->tai++;
txc->time.tv_sec--;
} if ((ntpdata->time_state == TIME_DEL) && (ntpdata->time_status & STA_DEL)) {
result = TIME_WAIT;
txc->tai--;
txc->time.tv_sec++;
} if ((ntpdata->time_state == TIME_OOP) && (ts->tv_sec == ntpdata->ntp_next_leap_sec))
result = TIME_WAIT;
}
return result;
}
#ifdef CONFIG_NTP_PPS
/* * struct pps_normtime is basically a struct timespec, but it is * semantically different (and it is the reason why it was invented): * pps_normtime.nsec has a range of ( -NSEC_PER_SEC / 2, NSEC_PER_SEC / 2 ] * while timespec.tv_nsec has a range of [0, NSEC_PER_SEC)
*/ struct pps_normtime {
s64 sec; /* seconds */ long nsec; /* nanoseconds */
};
/* * Normalize the timestamp so that nsec is in the * [ -NSEC_PER_SEC / 2, NSEC_PER_SEC / 2 ] interval
*/ staticinlinestruct pps_normtime pps_normalize_ts(struct timespec64 ts)
{ struct pps_normtime norm = {
.sec = ts.tv_sec,
.nsec = ts.tv_nsec
};
/* Get current phase correction and jitter */ staticinlinelong pps_phase_filter_get(struct ntp_data *ntpdata, long *jitter)
{
*jitter = ntpdata->pps_tf[0] - ntpdata->pps_tf[1]; if (*jitter < 0)
*jitter = -*jitter;
/* TODO: test various filters */ return ntpdata->pps_tf[0];
}
/* Add the sample to the phase filter */ staticinlinevoid pps_phase_filter_add(struct ntp_data *ntpdata, long err)
{
ntpdata->pps_tf[2] = ntpdata->pps_tf[1];
ntpdata->pps_tf[1] = ntpdata->pps_tf[0];
ntpdata->pps_tf[0] = err;
}
/* * Decrease frequency calibration interval length. It is halved after four * consecutive unstable intervals.
*/ staticinlinevoid pps_dec_freq_interval(struct ntp_data *ntpdata)
{ if (--ntpdata->pps_intcnt <= -PPS_INTCOUNT) {
ntpdata->pps_intcnt = -PPS_INTCOUNT; if (ntpdata->pps_shift > PPS_INTMIN) {
ntpdata->pps_shift--;
ntpdata->pps_intcnt = 0;
}
}
}
/* * Increase frequency calibration interval length. It is doubled after * four consecutive stable intervals.
*/ staticinlinevoid pps_inc_freq_interval(struct ntp_data *ntpdata)
{ if (++ntpdata->pps_intcnt >= PPS_INTCOUNT) {
ntpdata->pps_intcnt = PPS_INTCOUNT; if (ntpdata->pps_shift < PPS_INTMAX) {
ntpdata->pps_shift++;
ntpdata->pps_intcnt = 0;
}
}
}
/* * Update clock frequency based on MONOTONIC_RAW clock PPS signal * timestamps * * At the end of the calibration interval the difference between the * first and last MONOTONIC_RAW clock timestamps divided by the length * of the interval becomes the frequency update. If the interval was * too long, the data are discarded. * Returns the difference between old and new frequency values.
*/ staticlong hardpps_update_freq(struct ntp_data *ntpdata, struct pps_normtime freq_norm)
{ long delta, delta_mod;
s64 ftemp;
/* Check if the frequency interval was too long */ if (freq_norm.sec > (2 << ntpdata->pps_shift)) {
ntpdata->time_status |= STA_PPSERROR;
ntpdata->pps_errcnt++;
pps_dec_freq_interval(ntpdata);
printk_deferred(KERN_ERR "hardpps: PPSERROR: interval too long - %lld s\n",
freq_norm.sec); return 0;
}
/* * Here the raw frequency offset and wander (stability) is * calculated. If the wander is less than the wander threshold the * interval is increased; otherwise it is decreased.
*/
ftemp = div_s64(((s64)(-freq_norm.nsec)) << NTP_SCALE_SHIFT,
freq_norm.sec);
delta = shift_right(ftemp - ntpdata->pps_freq, NTP_SCALE_SHIFT);
ntpdata->pps_freq = ftemp; if (delta > PPS_MAXWANDER || delta < -PPS_MAXWANDER) {
printk_deferred(KERN_WARNING "hardpps: PPSWANDER: change=%ld\n", delta);
ntpdata->time_status |= STA_PPSWANDER;
ntpdata->pps_stbcnt++;
pps_dec_freq_interval(ntpdata);
} else { /* Good sample */
pps_inc_freq_interval(ntpdata);
}
/* * The stability metric is calculated as the average of recent * frequency changes, but is used only for performance monitoring
*/
delta_mod = delta; if (delta_mod < 0)
delta_mod = -delta_mod;
ntpdata->pps_stabil += (div_s64(((s64)delta_mod) << (NTP_SCALE_SHIFT - SHIFT_USEC),
NSEC_PER_USEC) - ntpdata->pps_stabil) >> PPS_INTMIN;
/* If enabled, the system clock frequency is updated */ if ((ntpdata->time_status & STA_PPSFREQ) && !(ntpdata->time_status & STA_FREQHOLD)) {
ntpdata->time_freq = ntpdata->pps_freq;
ntp_update_frequency(ntpdata);
}
return delta;
}
/* Correct REALTIME clock phase error against PPS signal */ staticvoid hardpps_update_phase(struct ntp_data *ntpdata, long error)
{ long correction = -error; long jitter;
/* Add the sample to the median filter */
pps_phase_filter_add(ntpdata, correction);
correction = pps_phase_filter_get(ntpdata, &jitter);
/* * Nominal jitter is due to PPS signal noise. If it exceeds the * threshold, the sample is discarded; otherwise, if so enabled, * the time offset is updated.
*/ if (jitter > (ntpdata->pps_jitter << PPS_POPCORN)) {
printk_deferred(KERN_WARNING "hardpps: PPSJITTER: jitter=%ld, limit=%ld\n",
jitter, (ntpdata->pps_jitter << PPS_POPCORN));
ntpdata->time_status |= STA_PPSJITTER;
ntpdata->pps_jitcnt++;
} elseif (ntpdata->time_status & STA_PPSTIME) { /* Correct the time using the phase offset */
ntpdata->time_offset = div_s64(((s64)correction) << NTP_SCALE_SHIFT,
NTP_INTERVAL_FREQ); /* Cancel running adjtime() */
ntpdata->time_adjust = 0;
} /* Update jitter */
ntpdata->pps_jitter += (jitter - ntpdata->pps_jitter) >> PPS_INTMIN;
}
/* * __hardpps() - discipline CPU clock oscillator to external PPS signal * * This routine is called at each PPS signal arrival in order to * discipline the CPU clock oscillator to the PPS signal. It takes two * parameters: REALTIME and MONOTONIC_RAW clock timestamps. The former * is used to correct clock phase error and the latter is used to * correct the frequency. * * This code is based on David Mills's reference nanokernel * implementation. It was mostly rewritten but keeps the same idea.
*/ void __hardpps(conststruct timespec64 *phase_ts, conststruct timespec64 *raw_ts)
{ struct ntp_data *ntpdata = &tk_ntp_data[TIMEKEEPER_CORE]; struct pps_normtime pts_norm, freq_norm;
pts_norm = pps_normalize_ts(*phase_ts);
/* Clear the error bits, they will be set again if needed */
ntpdata->time_status &= ~(STA_PPSJITTER | STA_PPSWANDER | STA_PPSERROR);
/* * When called for the first time, just start the frequency * interval
*/ if (unlikely(ntpdata->pps_fbase.tv_sec == 0)) {
ntpdata->pps_fbase = *raw_ts; return;
}
/* Ok, now we have a base for frequency calculation */
freq_norm = pps_normalize_ts(timespec64_sub(*raw_ts, ntpdata->pps_fbase));
/* * Check that the signal is in the range * [1s - MAXFREQ us, 1s + MAXFREQ us], otherwise reject it
*/ if ((freq_norm.sec == 0) || (freq_norm.nsec > MAXFREQ * freq_norm.sec) ||
(freq_norm.nsec < -MAXFREQ * freq_norm.sec)) {
ntpdata->time_status |= STA_PPSJITTER; /* Restart the frequency calibration interval */
ntpdata->pps_fbase = *raw_ts;
printk_deferred(KERN_ERR "hardpps: PPSJITTER: bad pulse\n"); return;
}
/* Signal is ok. Check if the current frequency interval is finished */ if (freq_norm.sec >= (1 << ntpdata->pps_shift)) {
ntpdata->pps_calcnt++; /* Restart the frequency calibration interval */
ntpdata->pps_fbase = *raw_ts;
hardpps_update_freq(ntpdata, freq_norm);
}
hardpps_update_phase(ntpdata, pts_norm.nsec);
} #endif/* CONFIG_NTP_PPS */
staticint __init ntp_tick_adj_setup(char *str)
{ int rc = kstrtos64(str, 0, &tk_ntp_data[TIMEKEEPER_CORE].ntp_tick_adj); if (rc) return rc;
Die Informationen auf dieser Webseite wurden
nach bestem Wissen sorgfältig zusammengestellt. Es wird jedoch weder Vollständigkeit, noch Richtigkeit,
noch Qualität der bereit gestellten Informationen zugesichert.
Bemerkung:
Die farbliche Syntaxdarstellung und die Messung sind noch experimentell.