/* The oneshot mode have very high deviation, don't use it! */
.set_state_shutdown = mfgpt_timer_shutdown,
.set_state_periodic = mfgpt_timer_set_periodic,
.irq = CS5536_MFGPT_INTR,
};
/* * get MFGPT base address * * NOTE: do not remove me, it's need for the value of mfgpt_base is * variable
*/
_rdmsr(DIVIL_MSR_REG(DIVIL_LBAR_MFGPT), &basehi, &mfgpt_base);
/* * Initialize the conversion factor and the min/max deltas of the clock event * structure and register the clock event source with the framework.
*/ void __init setup_mfgpt0_timer(void)
{
u32 basehi; struct clock_event_device *cd = &mfgpt_clockevent; unsignedint cpu = smp_processor_id();
/* get MFGPT base address */
_rdmsr(DIVIL_MSR_REG(DIVIL_LBAR_MFGPT), &basehi, &mfgpt_base);
clockevents_register_device(cd);
if (request_irq(CS5536_MFGPT_INTR, timer_interrupt,
IRQF_NOBALANCING | IRQF_TIMER, "timer", NULL))
pr_err("Failed to register timer interrupt\n");
}
/* * Since the MFGPT overflows every tick, its not very useful * to just read by itself. So use jiffies to emulate a free * running counter:
*/ static u64 mfgpt_read(struct clocksource *cs)
{ unsignedlong flags; int count;
u32 jifs; staticint old_count; static u32 old_jifs;
raw_spin_lock_irqsave(&mfgpt_lock, flags); /* * Although our caller may have the read side of xtime_lock, * this is now a seqlock, and we are cheating in this routine * by having side effects on state that we cannot undo if * there is a collision on the seqlock and our caller has to * retry. (Namely, old_jifs and old_count.) So we must treat * jiffies as volatile despite the lock. We read jiffies * before latching the timer count to guarantee that although * the jiffies value might be older than the count (that is, * the counter may underflow between the last point where * jiffies was incremented and the point where we latch the * count), it cannot be newer.
*/
jifs = jiffies; /* read the count */
count = inw(MFGPT0_CNT);
/* * It's possible for count to appear to go the wrong way for this * reason: * * The timer counter underflows, but we haven't handled the resulting * interrupt and incremented jiffies yet. * * Previous attempts to handle these cases intelligently were buggy, so * we just do the simple thing now.
*/ if (count < old_count && jifs == old_jifs)
count = old_count;
old_count = count;
old_jifs = jifs;
raw_spin_unlock_irqrestore(&mfgpt_lock, flags);
return (u64) (jifs * COMPARE) + count;
}
staticstruct clocksource clocksource_mfgpt = {
.name = "mfgpt",
.rating = 120, /* Functional for real use, but not desired */
.read = mfgpt_read,
.mask = CLOCKSOURCE_MASK(32),
};
int __init init_mfgpt_clocksource(void)
{ if (num_possible_cpus() > 1) /* MFGPT does not scale! */ return 0;
Die Informationen auf dieser Webseite wurden
nach bestem Wissen sorgfältig zusammengestellt. Es wird jedoch weder Vollständigkeit, noch Richtigkeit,
noch Qualität der bereit gestellten Informationen zugesichert.
Bemerkung:
Die farbliche Syntaxdarstellung und die Messung sind noch experimentell.