/* * 'what should we do if we get a hw irq event on an illegal vector'. * each architecture has to answer this themselves.
*/ void ack_bad_irq(unsignedint irq)
{ if (printk_ratelimit())
pr_err("unexpected IRQ trap at vector %02x\n", irq);
/* * Currently unexpected vectors happen only on SMP and APIC. * We _must_ ack these because every local APIC has only N * irq slots per priority level, and a 'hanging, unacked' IRQ * holds up an irq slot - in excessive cases (when multiple * unexpected vectors occur) that might lock up the APIC * completely. * But only ack when the APIC is enabled -AK
*/
apic_eoi();
}
#define irq_stats(x) (&per_cpu(irq_stat, x)) /* * /proc/interrupts printing for arch specific interrupts
*/ int arch_show_interrupts(struct seq_file *p, int prec)
{ int j;
#ifdef CONFIG_X86_LOCAL_APIC
sum += irq_stats(cpu)->apic_timer_irqs;
sum += irq_stats(cpu)->irq_spurious_count;
sum += irq_stats(cpu)->apic_perf_irqs;
sum += irq_stats(cpu)->apic_irq_work_irqs;
sum += irq_stats(cpu)->icr_read_retry_count; if (x86_platform_ipi_callback)
sum += irq_stats(cpu)->x86_platform_ipis; #endif #ifdef CONFIG_SMP
sum += irq_stats(cpu)->irq_resched_count;
sum += irq_stats(cpu)->irq_call_count; #endif #ifdef CONFIG_X86_THERMAL_VECTOR
sum += irq_stats(cpu)->irq_thermal_count; #endif #ifdef CONFIG_X86_MCE_THRESHOLD
sum += irq_stats(cpu)->irq_threshold_count; #endif #ifdef CONFIG_X86_HV_CALLBACK_VECTOR
sum += irq_stats(cpu)->irq_hv_callback_count; #endif #if IS_ENABLED(CONFIG_HYPERV)
sum += irq_stats(cpu)->irq_hv_reenlightenment_count;
sum += irq_stats(cpu)->hyperv_stimer0_count; #endif #ifdef CONFIG_X86_MCE
sum += per_cpu(mce_exception_count, cpu);
sum += per_cpu(mce_poll_count, cpu); #endif return sum;
}
u64 arch_irq_stat(void)
{
u64 sum = atomic_read(&irq_err_count); return sum;
}
if (likely(!IS_ERR_OR_NULL(desc))) {
handle_irq(desc, regs); returntrue;
}
/* * Reevaluate with vector_lock held to prevent a race against * request_irq() setting up the vector: * * CPU0 CPU1 * interrupt is raised in APIC IRR * but not handled * free_irq() * per_cpu(vector_irq, CPU1)[vector] = VECTOR_SHUTDOWN; * * request_irq() common_interrupt() * d = this_cpu_read(vector_irq[vector]); * * per_cpu(vector_irq, CPU1)[vector] = desc; * * if (d == VECTOR_SHUTDOWN) * this_cpu_write(vector_irq[vector], VECTOR_UNUSED); * * This requires that the same vector on the same target CPU is * handed out or that a spurious interrupt hits that CPU/vector.
*/
lock_vector_lock();
desc = reevaluate_vector(vector);
unlock_vector_lock();
if (!desc) returnfalse;
handle_irq(desc, regs); returntrue;
}
/* * common_interrupt() handles all normal device IRQ's (the special SMP * cross-CPU interrupts have their own entry points).
*/
DEFINE_IDTENTRY_IRQ(common_interrupt)
{ struct pt_regs *old_regs = set_irq_regs(regs);
/* entry code tells RCU that we're not quiescent. Check it. */
RCU_LOCKDEP_WARN(!rcu_is_watching(), "IRQ failed to wake up RCU");
if (unlikely(!call_irq_handler(vector, regs)))
apic_eoi();
set_irq_regs(old_regs);
}
#ifdef CONFIG_X86_LOCAL_APIC /* Function pointer for generic interrupt vector handling */ void (*x86_platform_ipi_callback)(void) = NULL; /* * Handler for X86_PLATFORM_IPI_VECTOR.
*/
DEFINE_IDTENTRY_SYSVEC(sysvec_x86_platform_ipi)
{ struct pt_regs *old_regs = set_irq_regs(regs);
apic_eoi();
trace_x86_platform_ipi_entry(X86_PLATFORM_IPI_VECTOR);
inc_irq_stat(x86_platform_ipis); if (x86_platform_ipi_callback)
x86_platform_ipi_callback();
trace_x86_platform_ipi_exit(X86_PLATFORM_IPI_VECTOR);
set_irq_regs(old_regs);
} #endif
/* * Performance data shows that 3 is good enough to harvest 90+% of the benefit * on high IRQ rate workload.
*/ #define MAX_POSTED_MSI_COALESCING_LOOP 3
/* * For MSIs that are delivered as posted interrupts, the CPU notifications * can be coalesced if the MSIs arrive in high frequency bursts.
*/
DEFINE_IDTENTRY_SYSVEC(sysvec_posted_msi_notification)
{ struct pt_regs *old_regs = set_irq_regs(regs); struct pi_desc *pid; int i = 0;
/* * Max coalescing count includes the extra round of handle_pending_pir * after clearing the outstanding notification bit. Hence, at most * MAX_POSTED_MSI_COALESCING_LOOP - 1 loops are executed here.
*/ while (++i < MAX_POSTED_MSI_COALESCING_LOOP) { if (!handle_pending_pir(pid->pir, regs)) break;
}
/* * Clear outstanding notification bit to allow new IRQ notifications, * do this last to maximize the window of interrupt coalescing.
*/
pi_clear_on(pid);
/* * There could be a race of PI notification and the clearing of ON bit, * process PIR bits one last time such that handling the new interrupts * are not delayed until the next IRQ.
*/
handle_pending_pir(pid->pir, regs);
#ifdef CONFIG_HOTPLUG_CPU /* A cpu has been removed from cpu_online_mask. Reset irq affinities. */ void fixup_irqs(void)
{ unsignedint vector; struct irq_desc *desc; struct irq_data *data; struct irq_chip *chip;
irq_migrate_all_off_this_cpu();
/* * We can remove mdelay() and then send spurious interrupts to * new cpu targets for all the irqs that were handled previously by * this cpu. While it works, I have seen spurious interrupt messages * (nothing wrong but still...). * * So for now, retain mdelay(1) and check the IRR and then send those * interrupts to new targets as this cpu is already offlined...
*/
mdelay(1);
/* * We can walk the vector array of this cpu without holding * vector_lock because the cpu is already marked !online, so * nothing else will touch it.
*/ for (vector = FIRST_EXTERNAL_VECTOR; vector < NR_VECTORS; vector++) { if (IS_ERR_OR_NULL(__this_cpu_read(vector_irq[vector]))) continue;
if (is_vector_pending(vector)) {
desc = __this_cpu_read(vector_irq[vector]);
raw_spin_lock(&desc->lock);
data = irq_desc_get_irq_data(desc);
chip = irq_data_get_irq_chip(data); if (chip->irq_retrigger) {
chip->irq_retrigger(data);
__this_cpu_write(vector_irq[vector], VECTOR_RETRIGGERED);
}
raw_spin_unlock(&desc->lock);
} if (__this_cpu_read(vector_irq[vector]) != VECTOR_RETRIGGERED)
__this_cpu_write(vector_irq[vector], VECTOR_UNUSED);
}
} #endif
Die Informationen auf dieser Webseite wurden
nach bestem Wissen sorgfältig zusammengestellt. Es wird jedoch weder Vollständigkeit, noch Richtigkeit,
noch Qualität der bereit gestellten Informationen zugesichert.
Bemerkung:
Die farbliche Syntaxdarstellung und die Messung sind noch experimentell.