/* * Overall diagram of the Armada XP interrupt controller: * * To CPU 0 To CPU 1 * * /\ /\ * || || * +---------------+ +---------------+ * | | | | * | per-CPU | | per-CPU | * | mask/unmask | | mask/unmask | * | CPU0 | | CPU1 | * | | | | * +---------------+ +---------------+ * /\ /\ * || || * \\_______________________// * || * +-------------------+ * | | * | Global interrupt | * | mask/unmask | * | | * +-------------------+ * /\ * || * interrupt from * device * * The "global interrupt mask/unmask" is modified using the * MPIC_INT_SET_ENABLE and MPIC_INT_CLEAR_ENABLE * registers, which are relative to "mpic->base". * * The "per-CPU mask/unmask" is modified using the MPIC_INT_SET_MASK * and MPIC_INT_CLEAR_MASK registers, which are relative to * "mpic->per_cpu". This base address points to a special address, * which automatically accesses the registers of the current CPU. * * The per-CPU mask/unmask can also be adjusted using the global * per-interrupt MPIC_INT_SOURCE_CTL register, which we use to * configure interrupt affinity. * * Due to this model, all interrupts need to be mask/unmasked at two * different levels: at the global level and at the per-CPU level. * * This driver takes the following approach to deal with this: * * - For global interrupts: * * At ->map() time, a global interrupt is unmasked at the per-CPU * mask/unmask level. It is therefore unmasked at this level for * the current CPU, running the ->map() code. This allows to have * the interrupt unmasked at this level in non-SMP * configurations. In SMP configurations, the ->set_affinity() * callback is called, which using the MPIC_INT_SOURCE_CTL() * readjusts the per-CPU mask/unmask for the interrupt. * * The ->mask() and ->unmask() operations only mask/unmask the * interrupt at the "global" level. * * So, a global interrupt is enabled at the per-CPU level as soon * as it is mapped. At run time, the masking/unmasking takes place * at the global level. * * - For per-CPU interrupts * * At ->map() time, a per-CPU interrupt is unmasked at the global * mask/unmask level. * * The ->mask() and ->unmask() operations mask/unmask the interrupt * at the per-CPU level. * * So, a per-CPU interrupt is enabled at the global level as soon * as it is mapped. At run time, the masking/unmasking takes place * at the per-CPU level.
*/
/** * struct mpic - MPIC private data structure * @base: MPIC registers base address * @per_cpu: per-CPU registers base address * @parent_irq: parent IRQ if MPIC is not top-level interrupt controller * @domain: MPIC main interrupt domain * @ipi_domain: IPI domain * @msi_inner_domain: MSI inner domain * @msi_used: bitmap of used MSI numbers * @msi_lock: mutex serializing access to @msi_used * @msi_doorbell_addr: physical address of MSI doorbell register * @msi_doorbell_mask: mask of available doorbell bits for MSIs (either PCI_MSI_DOORBELL_MASK or * PCI_MSI_FULL_DOORBELL_MASK) * @msi_doorbell_start: first set bit in @msi_doorbell_mask * @msi_doorbell_size: number of set bits in @msi_doorbell_mask * @doorbell_mask: doorbell mask of MSIs and IPIs, stored on suspend, restored on resume
*/ struct mpic { void __iomem *base; void __iomem *per_cpu; int parent_irq; struct irq_domain *domain; #ifdef CONFIG_SMP struct irq_domain *ipi_domain; #endif #ifdef CONFIG_PCI_MSI struct irq_domain *msi_inner_domain;
DECLARE_BITMAP(msi_used, PCI_MSI_FULL_DOORBELL_NR); struct mutex msi_lock;
phys_addr_t msi_doorbell_addr;
u32 msi_doorbell_mask; unsignedint msi_doorbell_start, msi_doorbell_size; #endif
u32 doorbell_mask;
};
staticstruct mpic *mpic_data __ro_after_init;
staticinlinebool mpic_is_ipi_available(struct mpic *mpic)
{ /* * We distinguish IPI availability in the IC by the IC not having a * parent irq defined. If a parent irq is defined, there is a parent * interrupt controller (e.g. GIC) that takes care of inter-processor * interrupts.
*/ return mpic->parent_irq <= 0;
}
/* * In SMP mode: * For shared global interrupts, mask/unmask global enable bit * For CPU interrupts, mask/unmask the calling CPU's bit
*/ staticvoid mpic_irq_mask(struct irq_data *d)
{ struct mpic *mpic = irq_data_get_irq_chip_data(d);
irq_hw_number_t hwirq = irqd_to_hwirq(d);
for (unsignedint i = 0; i < nr_irqs; i++) {
irq_domain_set_info(domain, virq + i, hwirq + i,
&mpic_msi_bottom_irq_chip,
domain->host_data, handle_simple_irq,
NULL, NULL);
}
/* * This Performance Counter Overflow interrupt is specific for * Armada 370 and XP. It is not available on Armada 375, 38x and 39x.
*/ if (!of_machine_is_compatible("marvell,armada-370-xp")) return;
/* Check if the interrupt is not masked on current CPU. * Test IRQ (0-1) and FIQ (8-9) mask bits.
*/ if (!(irqsrc & MPIC_INT_IRQ_FIQ_MASK(cpuid))) continue;
if (i == 0 || i == 1) {
mpic_handle_msi_irq(mpic); continue;
}
/* Re-enable interrupts */ for (irq_hw_number_t i = 0; i < mpic->domain->hwirq_max; i++) { unsignedint virq = irq_find_mapping(mpic->domain, i); struct irq_data *d;
if (!virq) continue;
d = irq_get_irq_data(virq);
if (!mpic_is_percpu_irq(i)) { /* Non per-CPU interrupts */
writel(i, mpic->per_cpu + MPIC_INT_CLEAR_MASK); if (!irqd_irq_disabled(d))
mpic_irq_unmask(d);
} else { /* Per-CPU interrupts */
writel(i, mpic->base + MPIC_INT_SET_ENABLE);
/* * Re-enable on the current CPU, mpic_reenable_percpu() * will take care of secondary CPUs when they come up.
*/ if (irq_percpu_is_enabled(virq))
mpic_irq_unmask(d);
}
}
/* Reconfigure doorbells for IPIs and MSIs */
writel(mpic->doorbell_mask, mpic->per_cpu + MPIC_IN_DRBEL_MASK);
for (irq_hw_number_t i = 0; i < nr_irqs; i++)
writel(i, mpic->base + MPIC_INT_CLEAR_ENABLE);
/* * Initialize mpic->parent_irq before calling any other functions, since * it is used to distinguish between IPI and non-IPI platforms.
*/
mpic->parent_irq = irq_of_parse_and_map(node, 0);
/* * On non-IPI platforms the driver currently supports only the per-CPU * interrupts (the first 29 interrupts). See mpic_handle_cascade_irq().
*/ if (!mpic_is_ipi_available(mpic))
nr_irqs = MPIC_PER_CPU_IRQS_NR;
mpic->domain = irq_domain_create_linear(of_fwnode_handle(node), nr_irqs, &mpic_irq_ops, mpic); if (!mpic->domain) {
pr_err("%pOF: Unable to add IRQ domain\n", node); return -ENOMEM;
}
Die Informationen auf dieser Webseite wurden
nach bestem Wissen sorgfältig zusammengestellt. Es wird jedoch weder Vollständigkeit, noch Richtigkeit,
noch Qualität der bereit gestellten Informationen zugesichert.
Bemerkung:
Die farbliche Syntaxdarstellung und die Messung sind noch experimentell.