/* * 'what should we do if we get a hw irq event on an illegal vector'. * each architecture has to answer this themselves, it doesn't deserve * a generic callback i think.
*/ void ack_bad_irq(unsignedint irq)
{
atomic_inc(&irq_err_count);
printk("unexpected IRQ trap at vector %02x\n", irq);
}
#ifdefined(CONFIG_PROC_FS) /* * /proc/interrupts printing for arch specific interrupts
*/ int arch_show_interrupts(struct seq_file *p, int prec)
{ int j;
/* * this is where we switch to the IRQ stack. However, if we are * already using the IRQ stack (because we interrupted a hardirq * handler) we can't do that and just have to keep using the * current stack (which is the irq stack already after all)
*/ if (curctx != irqctx) {
u32 *isp;
/* * Copy the softirq bits in preempt_count so that the * softirq checks work in the hardirq context.
*/
irqctx->tinfo.preempt_count =
(irqctx->tinfo.preempt_count & ~SOFTIRQ_MASK) |
(curctx->tinfo.preempt_count & SOFTIRQ_MASK);
if (irq != NO_IRQ_IGNORE) {
handle_one_irq(irq);
irq_finish(irq);
}
irq_exit();
set_irq_regs(old_regs);
return IRQ_HANDLED;
}
void __init init_IRQ(void)
{
plat_irq_setup();
/* Perform the machine specific initialisation */ if (sh_mv.mv_init_irq)
sh_mv.mv_init_irq();
intc_finalize();
irq_ctx_init(smp_processor_id());
}
#ifdef CONFIG_HOTPLUG_CPU /* * The CPU has been marked offline. Migrate IRQs off this CPU. If * the affinity settings do not allow other CPUs, force them onto any * available CPU.
*/ void migrate_irqs(void)
{ unsignedint irq, cpu = smp_processor_id();
Die Informationen auf dieser Webseite wurden
nach bestem Wissen sorgfältig zusammengestellt. Es wird jedoch weder Vollständigkeit, noch Richtigkeit,
noch Qualität der bereit gestellten Informationen zugesichert.
Bemerkung:
Die farbliche Syntaxdarstellung und die Messung sind noch experimentell.