/* FBNIC timing & PTP implementation * Datapath uses truncated 40b timestamps for scheduling and event reporting. * We need to promote those to full 64b, hence we periodically cache the top * 32bit of the HW time counter. Since this makes our time reporting non-atomic * we leave the HW clock free running and adjust time offsets in SW as needed. * Time offset is 64bit - we need a seq counter for 32bit machines. * Time offset and the cache of top bits are independent so we don't need * a coherent snapshot of both - READ_ONCE()/WRITE_ONCE() + writer side lock * are enough.
*/
/* Period of refresh of top bits of timestamp, give ourselves a 8x margin. * This should translate to once a minute. * The use of nsecs_to_jiffies() should be safe for a <=40b nsec value.
*/ #define FBNIC_TS_HIGH_REFRESH_JIF nsecs_to_jiffies((1ULL << 40) / 16)
/* This function is "slow" because we could try guessing which high part * is correct based on low instead of re-reading, and skip reading @hi * twice altogether if @lo is far enough from 0.
*/ static u64 __fbnic_time_get_slow(struct fbnic_dev *fbd)
{
u32 hi, lo;
lockdep_assert_held(&fbd->time_lock);
do {
hi = fbnic_rd32(fbd, FBNIC_PTP_CTR_VAL_HI);
lo = fbnic_rd32(fbd, FBNIC_PTP_CTR_VAL_LO);
} while (hi != fbnic_rd32(fbd, FBNIC_PTP_CTR_VAL_HI));
spin_lock_irqsave(&fbd->time_lock, flags);
hi = fbnic_rd32(fbn->fbd, FBNIC_PTP_CTR_VAL_HI); if (!fbnic_present(fbd)) goto out; /* Don't bother handling, reset is pending */ /* Let's keep high cached value a bit lower to avoid race with * incoming timestamps. The logic in fbnic_ts40_to_ns() will * take care of overflow in this case. It will make cached time * ~1 minute lower and incoming timestamp will always be later * then cached time.
*/
WRITE_ONCE(fbn->time_high, hi - 16);
fbd->last_read = jiffies;
out:
spin_unlock_irqrestore(&fbd->time_lock, flags);
}
/* d_clock is 600 MHz; which in Q16.32 fixed point ns is: */
dclk_period = (((u64)1000000000) << 32) / FBNIC_CLOCK_FREQ;
addend = adjust_by_scaled_ppm(dclk_period, scaled_ppm);
/* Flush, make sure FBNIC_PTP_ADD_VAL_* is stable for at least 4 clks */
fbnic_rd32(fbd, FBNIC_PTP_SPARE);
spin_unlock_irqrestore(&fbd->time_lock, flags);
do {
hi = fbnic_rd32(fbd, FBNIC_PTP_CTR_VAL_HI);
ptp_read_system_prets(sts);
lo = fbnic_rd32(fbd, FBNIC_PTP_CTR_VAL_LO);
ptp_read_system_postts(sts); /* Similarly to comment above __fbnic_time_get_slow() * - this can be optimized if needed.
*/
} while (hi != fbnic_rd32(fbd, FBNIC_PTP_CTR_VAL_HI));
void fbnic_time_init(struct fbnic_net *fbn)
{ /* This is not really a statistic, but the lockng primitive fits * our usecase perfectly, we need an atomic 8 bytes READ_ONCE() / * WRITE_ONCE() behavior.
*/
u64_stats_init(&fbn->time_seq);
}
int fbnic_time_start(struct fbnic_net *fbn)
{
fbnic_ptp_refresh_time(fbn->fbd, fbn); /* Assume that fbnic_ptp_do_aux_work() will never be called if not * scheduled here
*/ return ptp_schedule_worker(fbn->fbd->ptp, FBNIC_TS_HIGH_REFRESH_JIF);
}
Die Informationen auf dieser Webseite wurden
nach bestem Wissen sorgfältig zusammengestellt. Es wird jedoch weder Vollständigkeit, noch Richtigkeit,
noch Qualität der bereit gestellten Informationen zugesichert.
Bemerkung:
Die farbliche Syntaxdarstellung und die Messung sind noch experimentell.