/** * struct clock_data - all data needed for sched_clock() (including * registration of a new clock source) * * @seq: Sequence counter for protecting updates. The lowest * bit is the index for @read_data. * @read_data: Data required to read from sched_clock. * @wrap_kt: Duration for which clock can run before wrapping. * @rate: Tick rate of the registered clock. * @actual_read_sched_clock: Registered hardware level clock read function. * * The ordering of this structure has been chosen to optimize cache * performance. In particular 'seq' and 'read_data[0]' (combined) should fit * into a single 64-byte cache line.
*/ struct clock_data {
seqcount_latch_t seq; struct clock_read_data read_data[2];
ktime_t wrap_kt; unsignedlong rate;
static u64 notrace jiffy_sched_clock_read(void)
{ /* * We don't need to use get_jiffies_64 on 32-bit arches here * because we register with BITS_PER_LONG
*/ return (u64)(jiffies - INITIAL_JIFFIES);
}
unsignedlonglong notrace sched_clock(void)
{ unsignedlonglong ns;
preempt_disable_notrace(); /* * All of __sched_clock() is a seqcount_latch reader critical section, * but relies on the raw helpers which are uninstrumented. For KCSAN, * mark all accesses in __sched_clock() as atomic.
*/
kcsan_nestable_atomic_begin();
ns = __sched_clock();
kcsan_nestable_atomic_end();
preempt_enable_notrace(); return ns;
}
/* * Updating the data required to read the clock. * * sched_clock() will never observe mis-matched data even if called from * an NMI. We do this by maintaining an odd/even copy of the data and * steering sched_clock() to one or the other using a sequence counter. * In order to preserve the data cache profile of sched_clock() as much * as possible the system reverts back to the even copy when the update * completes; the odd copy is used *only* during an update.
*/ staticvoid update_clock_read_data(struct clock_read_data *rd)
{ /* steer readers towards the odd copy */
write_seqcount_latch_begin(&cd.seq);
/* now its safe for us to update the normal (even) copy */
cd.read_data[0] = *rd;
/* switch readers back to the even copy */
write_seqcount_latch(&cd.seq);
/* update the backup (odd) copy with the new data */
cd.read_data[1] = *rd;
/* Calculate how many nanosecs until we risk wrapping */
wrap = clocks_calc_max_nsecs(new_mult, new_shift, 0, new_mask, NULL);
cd.wrap_kt = ns_to_ktime(wrap);
rd = cd.read_data[0];
/* Update epoch for new counter and update 'epoch_ns' from old counter*/
new_epoch = read();
cyc = cd.actual_read_sched_clock();
ns = rd.epoch_ns + cyc_to_ns((cyc - rd.epoch_cyc) & rd.sched_clock_mask, rd.mult, rd.shift);
cd.actual_read_sched_clock = read;
if (sched_clock_timer.function != NULL) { /* update timeout for clock wrap */
hrtimer_start(&sched_clock_timer, cd.wrap_kt,
HRTIMER_MODE_REL_HARD);
}
r = rate; if (r >= 4000000) {
r = DIV_ROUND_CLOSEST(r, 1000000);
r_unit = 'M';
} elseif (r >= 4000) {
r = DIV_ROUND_CLOSEST(r, 1000);
r_unit = 'k';
} else {
r_unit = ' ';
}
/* Calculate the ns resolution of this counter */
res = cyc_to_ns(1ULL, new_mult, new_shift);
pr_info("sched_clock: %u bits at %lu%cHz, resolution %lluns, wraps every %lluns\n",
bits, r, r_unit, res, wrap);
/* Enable IRQ time accounting if we have a fast enough sched_clock() */ if (irqtime > 0 || (irqtime == -1 && rate >= 1000000))
enable_sched_clock_irqtime();
local_irq_restore(flags);
pr_debug("Registered %pS as sched_clock source\n", read);
}
void __init generic_sched_clock_init(void)
{ /* * If no sched_clock() function has been provided at that point, * make it the final one.
*/ if (cd.actual_read_sched_clock == jiffy_sched_clock_read)
sched_clock_register(jiffy_sched_clock_read, BITS_PER_LONG, HZ);
update_sched_clock();
/* * Start the timer to keep sched_clock() properly updated and * sets the initial epoch.
*/
hrtimer_setup(&sched_clock_timer, sched_clock_poll, CLOCK_MONOTONIC, HRTIMER_MODE_REL_HARD);
hrtimer_start(&sched_clock_timer, cd.wrap_kt, HRTIMER_MODE_REL_HARD);
}
/* * Clock read function for use when the clock is suspended. * * This function makes it appear to sched_clock() as if the clock * stopped counting at its last update. * * This function must only be called from the critical * section in sched_clock(). It relies on the read_seqcount_retry() * at the end of the critical section to be sure we observe the * correct copy of 'epoch_cyc'.
*/ static u64 notrace suspended_sched_clock_read(void)
{ unsignedint seq = read_seqcount_latch(&cd.seq);
return cd.read_data[seq & 1].epoch_cyc;
}
int sched_clock_suspend(void)
{ struct clock_read_data *rd = &cd.read_data[0];
Die Informationen auf dieser Webseite wurden
nach bestem Wissen sorgfältig zusammengestellt. Es wird jedoch weder Vollständigkeit, noch Richtigkeit,
noch Qualität der bereit gestellten Informationen zugesichert.
Bemerkung:
Die farbliche Syntaxdarstellung und die Messung sind noch experimentell.