/** * irq_fixup_move_pending - Cleanup irq move pending from a dying CPU * @desc: Interrupt descriptor to clean up * @force_clear: If set clear the move pending bit unconditionally. * If not set, clear it only when the dying CPU is the * last one in the pending mask. * * Returns true if the pending bit was set and the pending mask contains an * online CPU other than the dying CPU.
*/ bool irq_fixup_move_pending(struct irq_desc *desc, bool force_clear)
{ struct irq_data *data = irq_desc_get_irq_data(desc);
if (!irqd_is_setaffinity_pending(data)) returnfalse;
/* * The outgoing CPU might be the last online target in a pending * interrupt move. If that's the case clear the pending move bit.
*/ if (!cpumask_intersects(desc->pending_mask, cpu_online_mask)) {
irqd_clr_move_pending(data); returnfalse;
} if (force_clear)
irqd_clr_move_pending(data); returntrue;
}
void irq_force_complete_move(struct irq_desc *desc)
{ for (struct irq_data *d = irq_desc_get_irq_data(desc); d; d = irqd_get_parent_data(d)) { if (d->chip && d->chip->irq_force_complete_move) {
d->chip->irq_force_complete_move(d); return;
}
}
}
if (likely(!irqd_is_setaffinity_pending(data))) return;
irqd_clr_move_pending(data);
/* * Paranoia: cpu-local interrupts shouldn't be calling in here anyway.
*/ if (irqd_is_per_cpu(data)) {
WARN_ON(1); return;
}
if (unlikely(cpumask_empty(desc->pending_mask))) return;
if (!chip->irq_set_affinity) return;
assert_raw_spin_locked(&desc->lock);
/* * If there was a valid mask to work with, please * do the disable, re-program, enable sequence. * This is *not* particularly important for level triggered * but in a edge trigger case, we might be setting rte * when an active trigger is coming in. This could * cause some ioapics to mal-function. * Being paranoid i guess! * * For correct operation this depends on the caller * masking the irqs.
*/ if (cpumask_intersects(desc->pending_mask, cpu_online_mask)) { int ret;
ret = irq_do_set_affinity(data, desc->pending_mask, false); /* * If the there is a cleanup pending in the underlying * vector management, reschedule the move for the next * interrupt. Leave desc->pending_mask intact.
*/ if (ret == -EBUSY) {
irqd_set_move_pending(data); return;
}
}
cpumask_clear(desc->pending_mask);
}
/* * Get top level irq_data when CONFIG_IRQ_DOMAIN_HIERARCHY is enabled, * and it should be optimized away when CONFIG_IRQ_DOMAIN_HIERARCHY is * disabled. So we avoid an "#ifdef CONFIG_IRQ_DOMAIN_HIERARCHY" here.
*/
idata = irq_desc_get_irq_data(irq_data_to_desc(idata));
if (unlikely(irqd_irq_disabled(idata))) return;
/* * Be careful vs. already masked interrupts. If this is a * threaded interrupt with ONESHOT set, we can end up with an * interrupt storm.
*/
masked = irqd_irq_masked(idata); if (!masked)
idata->chip->irq_mask(idata);
irq_move_masked_irq(idata); if (!masked)
idata->chip->irq_unmask(idata);
}
bool irq_can_move_in_process_context(struct irq_data *data)
{ /* * Get the top level irq_data in the hierarchy, which is optimized * away when CONFIG_IRQ_DOMAIN_HIERARCHY is disabled.
*/
data = irq_desc_get_irq_data(irq_data_to_desc(data)); return irq_can_move_pcntxt(data);
}
Messung V0.5
¤ Dauer der Verarbeitung: 0.1 Sekunden
(vorverarbeitet)
¤
Die Informationen auf dieser Webseite wurden
nach bestem Wissen sorgfältig zusammengestellt. Es wird jedoch weder Vollständigkeit, noch Richtigkeit,
noch Qualität der bereit gestellten Informationen zugesichert.
Bemerkung:
Die farbliche Syntaxdarstellung und die Messung sind noch experimentell.