// SPDX-License-Identifier: GPL-2.0-only /* * Copyright (C) 2002 ARM Limited, All Rights Reserved. * * Interrupt architecture for the GIC: * * o There is one Interrupt Distributor, which receives interrupts * from system devices and sends them to the Interrupt Controllers. * * o There is one CPU Interface per CPU, which sends interrupts sent * by the Distributor, and interrupts generated locally, to the * associated CPU. The base address of the CPU interface is usually * aliased so that the same address points to different chips depending * on the CPU it is accessed from. * * Note that IRQs 0-31 are special - they are local to each CPU. * As such, the enable set/clear, pending set/clear and active bit * registers are banked per-cpu for these sources.
*/ #include <linux/init.h> #include <linux/kernel.h> #include <linux/kstrtox.h> #include <linux/err.h> #include <linux/module.h> #include <linux/list.h> #include <linux/smp.h> #include <linux/cpu.h> #include <linux/cpu_pm.h> #include <linux/cpumask.h> #include <linux/io.h> #include <linux/of.h> #include <linux/of_address.h> #include <linux/of_irq.h> #include <linux/acpi.h> #include <linux/irqdomain.h> #include <linux/interrupt.h> #include <linux/percpu.h> #include <linux/seq_file.h> #include <linux/slab.h> #include <linux/irqchip.h> #include <linux/irqchip/chained_irq.h> #include <linux/irqchip/arm-gic.h>
#define gic_lock_irqsave(f) do { (void)(f); } while(0) #define gic_unlock_irqrestore(f) do { (void)(f); } while(0)
#define gic_lock() do { } while(0) #define gic_unlock() do { } while(0)
#endif
static DEFINE_STATIC_KEY_FALSE(needs_rmw_access);
/* * The GIC mapping of CPU interfaces does not necessarily match * the logical CPU numbering. Let's use a mapping as returned * by the GIC itself.
*/ #define NR_GIC_CPU_IF 8 static u8 gic_cpu_map[NR_GIC_CPU_IF] __read_mostly;
staticvoid gic_eoimode1_mask_irq(struct irq_data *d)
{
gic_mask_irq(d); /* * When masking a forwarded interrupt, make sure it is * deactivated as well. * * This ensures that an interrupt that is getting * disabled/masked will not get "stuck", because there is * noone to deactivate it (guest is being terminated).
*/ if (irqd_is_forwarded_to_vcpu(d))
gic_poke_irq(d, GIC_DIST_ACTIVE_CLEAR);
}
/* Interrupt configuration for SGIs can't be changed */ if (gicirq < 16) return type != IRQ_TYPE_EDGE_RISING ? -EINVAL : 0;
/* SPIs have restrictions on the supported types */ if (gicirq >= 32 && type != IRQ_TYPE_LEVEL_HIGH &&
type != IRQ_TYPE_EDGE_RISING) return -EINVAL;
ret = gic_configure_irq(gicirq, type, base + GIC_DIST_CONFIG); if (ret && gicirq < 32) { /* Misconfigured PPIs are usually not fatal */
pr_warn("GIC: PPI%ld is secure or misconfigured\n", gicirq - 16);
ret = 0;
}
return ret;
}
staticint gic_irq_set_vcpu_affinity(struct irq_data *d, void *vcpu)
{ /* Only interrupts on the primary GIC can be forwarded to a vcpu. */ if (cascading_gic_irq(d) || irqd_to_hwirq(d) < 16) return -EINVAL;
if (vcpu)
irqd_set_forwarded_to_vcpu(d); else
irqd_clr_forwarded_to_vcpu(d); return 0;
}
if (static_branch_likely(&supports_deactivate_key))
writel_relaxed(irqstat, cpu_base + GIC_CPU_EOI);
isb();
/* * Ensure any shared data written by the CPU sending the IPI * is read after we've read the ACK register on the GIC. * * Pairs with the write barrier in gic_ipi_send_mask
*/ if (irqnr <= 15) {
smp_rmb();
/* * The GIC encodes the source CPU in GICC_IAR, * leading to the deactivation to fail if not * written back as is to GICC_EOI. Stash the INTID * away for gic_eoi_irq() to write back. This only * works because we don't nest SGIs...
*/
this_cpu_write(sgi_intid, irqstat);
}
generic_handle_domain_irq(gic->domain, irqnr);
} while (1);
}
writel_relaxed(GICD_DISABLE, base + GIC_DIST_CTRL);
/* * Set all global interrupts to this CPU only.
*/
cpumask = gic_get_cpumask(gic);
cpumask |= cpumask << 8;
cpumask |= cpumask << 16; for (i = 32; i < gic_irqs; i += 4)
writel_relaxed(cpumask, base + GIC_DIST_TARGET + i * 4 / 4);
/* * Setting up the CPU map is only relevant for the primary GIC * because any nested/secondary GICs do not directly interface * with the CPU(s).
*/ if (gic == &gic_data[0]) { /* * Get what the GIC says our CPU mask is.
*/ if (WARN_ON(cpu >= NR_GIC_CPU_IF)) return -EINVAL;
/* * Clear our mask from the other map entries in case they're * still undefined.
*/ for (i = 0; i < NR_GIC_CPU_IF; i++) if (i != cpu)
gic_cpu_map[i] &= ~cpu_mask;
}
gic_cpu_config(dist_base, 32, GICD_INT_DEF_PRI);
writel_relaxed(GICC_INT_PRI_THRESHOLD, base + GIC_CPU_PRIMASK);
gic_cpu_if_up(gic);
return 0;
}
int gic_cpu_if_down(unsignedint gic_nr)
{ void __iomem *cpu_base;
u32 val = 0;
if (gic_nr >= CONFIG_ARM_GIC_MAX_NR) return -EINVAL;
cpu_base = gic_data_cpu_base(&gic_data[gic_nr]);
val = readl(cpu_base + GIC_CPU_CTRL);
val &= ~GICC_ENABLE;
writel_relaxed(val, cpu_base + GIC_CPU_CTRL);
return 0;
}
#ifdefined(CONFIG_CPU_PM) || defined(CONFIG_ARM_GIC_PM) /* * Saves the GIC distributor registers during suspend or idle. Must be called * with interrupts disabled but before powering down the GIC. After calling * this function, no interrupts will be delivered by the GIC, and another * platform-specific wakeup source must be enabled.
*/ void gic_dist_save(struct gic_chip_data *gic)
{ unsignedint gic_irqs; void __iomem *dist_base; int i;
for (i = 0; i < DIV_ROUND_UP(gic_irqs, 16); i++)
gic->saved_spi_conf[i] =
readl_relaxed(dist_base + GIC_DIST_CONFIG + i * 4);
for (i = 0; i < DIV_ROUND_UP(gic_irqs, 4); i++)
gic->saved_spi_target[i] =
readl_relaxed(dist_base + GIC_DIST_TARGET + i * 4);
for (i = 0; i < DIV_ROUND_UP(gic_irqs, 32); i++)
gic->saved_spi_enable[i] =
readl_relaxed(dist_base + GIC_DIST_ENABLE_SET + i * 4);
for (i = 0; i < DIV_ROUND_UP(gic_irqs, 32); i++)
gic->saved_spi_active[i] =
readl_relaxed(dist_base + GIC_DIST_ACTIVE_SET + i * 4);
}
/* * Restores the GIC distributor registers during resume or when coming out of * idle. Must be called before enabling interrupts. If a level interrupt * that occurred while the GIC was suspended is still present, it will be * handled normally, but any edge interrupts that occurred will not be seen by * the GIC and need to be handled by the platform-specific wakeup source.
*/ void gic_dist_restore(struct gic_chip_data *gic)
{ unsignedint gic_irqs; unsignedint i; void __iomem *dist_base;
for (i = 0; i < CONFIG_ARM_GIC_MAX_NR; i++) { switch (cmd) { case CPU_PM_ENTER:
gic_cpu_save(&gic_data[i]); break; case CPU_PM_ENTER_FAILED: case CPU_PM_EXIT:
gic_cpu_restore(&gic_data[i]); break; case CPU_CLUSTER_PM_ENTER:
gic_dist_save(&gic_data[i]); break; case CPU_CLUSTER_PM_ENTER_FAILED: case CPU_CLUSTER_PM_EXIT:
gic_dist_restore(&gic_data[i]); break;
}
}
if (unlikely(nr_cpu_ids == 1)) { /* Only one CPU? let's do a self-IPI... */
writel_relaxed(2 << 24 | d->hwirq,
gic_data_dist_base(&gic_data[0]) + GIC_DIST_SOFTINT); return;
}
gic_lock_irqsave(flags);
/* Convert our logical CPU mask into a physical one. */
for_each_cpu(cpu, mask)
map |= gic_cpu_map[cpu];
/* * Ensure that stores to Normal memory are visible to the * other CPUs before they observe us issuing the IPI.
*/
dmb(ishst);
/* this always happens on GIC0 */
writel_relaxed(map << 16 | d->hwirq, gic_data_dist_base(&gic_data[0]) + GIC_DIST_SOFTINT);
#ifdef CONFIG_BL_SWITCHER /* * gic_send_sgi - send a SGI directly to given CPU interface number * * cpu_id: the ID for the destination CPU interface * irq: the IPI number to send a SGI for
*/ void gic_send_sgi(unsignedint cpu_id, unsignedint irq)
{
BUG_ON(cpu_id >= NR_GIC_CPU_IF);
cpu_id = 1 << cpu_id; /* this always happens on GIC0 */
writel_relaxed((cpu_id << 16) | irq, gic_data_dist_base(&gic_data[0]) + GIC_DIST_SOFTINT);
}
/* * gic_get_cpu_id - get the CPU interface ID for the specified CPU * * @cpu: the logical CPU number to get the GIC ID for. * * Return the CPU interface ID for the given logical CPU number, * or -1 if the CPU number is too large or the interface ID is * unknown (more than one bit set).
*/ int gic_get_cpu_id(unsignedint cpu)
{ unsignedint cpu_bit;
if (cpu >= NR_GIC_CPU_IF) return -1;
cpu_bit = gic_cpu_map[cpu]; if (cpu_bit & (cpu_bit - 1)) return -1; return __ffs(cpu_bit);
}
/* * gic_migrate_target - migrate IRQs to another CPU interface * * @new_cpu_id: the CPU target ID to migrate IRQs to * * Migrate all peripheral interrupts with a target matching the current CPU * to the interface corresponding to @new_cpu_id. The CPU interface mapping * is also updated. Targets to other CPU interfaces are unchanged. * This must be called with IRQs locally disabled.
*/ void gic_migrate_target(unsignedint new_cpu_id)
{ unsignedint cur_cpu_id, gic_irqs, gic_nr = 0; void __iomem *dist_base; int i, ror_val, cpu = smp_processor_id();
u32 val, cur_target_mask, active_mask;
BUG_ON(gic_nr >= CONFIG_ARM_GIC_MAX_NR);
dist_base = gic_data_dist_base(&gic_data[gic_nr]); if (!dist_base) return;
gic_irqs = gic_data[gic_nr].gic_irqs;
/* Update the target interface for this logical CPU */
gic_cpu_map[cpu] = 1 << new_cpu_id;
/* * Find all the peripheral interrupts targeting the current * CPU interface and migrate them to the new CPU interface. * We skip DIST_TARGET 0 to 7 as they are read-only.
*/ for (i = 8; i < DIV_ROUND_UP(gic_irqs, 4); i++) {
val = readl_relaxed(dist_base + GIC_DIST_TARGET + i * 4);
active_mask = val & cur_target_mask; if (active_mask) {
val &= ~active_mask;
val |= ror32(active_mask, ror_val);
writel_relaxed(val, dist_base + GIC_DIST_TARGET + i*4);
}
}
gic_unlock();
/* * Now let's migrate and clear any potential SGIs that might be * pending for us (cur_cpu_id). Since GIC_DIST_SGI_PENDING_SET * is a banked register, we can only forward the SGI using * GIC_DIST_SOFTINT. The original SGI source is lost but Linux * doesn't use that information anyway. * * For the same reason we do not adjust SGI source information * for previously sent SGIs by us to other CPUs either.
*/ for (i = 0; i < 16; i += 4) { int j;
val = readl_relaxed(dist_base + GIC_DIST_SGI_PENDING_SET + i); if (!val) continue;
writel_relaxed(val, dist_base + GIC_DIST_SGI_PENDING_CLEAR + i); for (j = i; j < i + 4; j++) { if (val & 0xff)
writel_relaxed((1 << (new_cpu_id + 16)) | j,
dist_base + GIC_DIST_SOFTINT);
val >>= 8;
}
}
}
/* * gic_get_sgir_physaddr - get the physical address for the SGI register * * Return the physical address of the SGI register to be used * by some early assembly code when the kernel is not yet available.
*/ staticunsignedlong gic_dist_physaddr;
/* * Find out how many interrupts are supported. * The GIC only supports up to 1020 interrupt sources.
*/
gic_irqs = readl_relaxed(gic_data_dist_base(gic) + GIC_DIST_CTR) & 0x1f;
gic_irqs = (gic_irqs + 1) * 32; if (gic_irqs > 1020)
gic_irqs = 1020;
gic->gic_irqs = gic_irqs;
gic->domain = irq_domain_create_linear(handle, gic_irqs,
&gic_irq_domain_hierarchy_ops,
gic); if (WARN_ON(!gic->domain)) {
ret = -ENODEV; goto error;
}
gic_dist_init(gic);
ret = gic_cpu_init(gic); if (ret) goto error;
ret = gic_pm_init(gic); if (ret) goto error;
return 0;
error: if (IS_ENABLED(CONFIG_GIC_NON_BANKED) && gic->percpu_offset) {
free_percpu(gic->dist_base.percpu_base);
free_percpu(gic->cpu_base.percpu_base);
}
return ret;
}
staticint __init __gic_init_bases(struct gic_chip_data *gic, struct fwnode_handle *handle)
{ int i, ret;
if (WARN_ON(!gic || gic->domain)) return -EINVAL;
if (gic == &gic_data[0]) { /* * Initialize the CPU interface map to all CPUs. * It will be refined as each CPU probes its ID. * This is only necessary for the primary GIC.
*/ for (i = 0; i < NR_GIC_CPU_IF; i++)
gic_cpu_map[i] = 0xff;
set_handle_irq(gic_handle_irq); if (static_branch_likely(&supports_deactivate_key))
pr_info("GIC: Using split EOI/Deactivate mode\n");
}
ret = gic_init_bases(gic, handle); if (gic == &gic_data[0])
gic_smp_init();
return ret;
}
staticvoid gic_teardown(struct gic_chip_data *gic)
{ if (WARN_ON(!gic)) return;
if (gic->raw_dist_base)
iounmap(gic->raw_dist_base); if (gic->raw_cpu_base)
iounmap(gic->raw_cpu_base);
}
if (!is_hyp_mode_available()) returnfalse; if (resource_size(&cpuif_res) < SZ_8K) { void __iomem *alt; /* * Check for a stupid firmware that only exposes the * first page of a GICv2.
*/ if (!gic_check_gicv2(*base)) returnfalse;
if (!gicv2_force_probe) {
pr_warn("GIC: GICv2 detected, but range too small and irqchip.gicv2_force_probe not set\n"); returnfalse;
}
alt = ioremap(cpuif_res.start, SZ_8K); if (!alt) returnfalse; if (!gic_check_gicv2(alt + SZ_4K)) { /* * The first page was that of a GICv2, and * the second was *something*. Let's trust it * to be a GICv2, and update the mapping.
*/
pr_warn("GIC: GICv2 at %pa, but range is too small (broken DT?), assuming 8kB\n",
&cpuif_res.start);
iounmap(*base);
*base = alt; returntrue;
}
/* * We detected *two* initial GICv2 pages in a * row. Could be a GICv2 aliased over two 64kB * pages. Update the resource, map the iospace, and * pray.
*/
iounmap(alt);
alt = ioremap(cpuif_res.start, SZ_128K); if (!alt) returnfalse;
pr_warn("GIC: Aliased GICv2 at %pa, trying to find the canonical range over 128kB\n",
&cpuif_res.start);
cpuif_res.end = cpuif_res.start + SZ_128K -1;
iounmap(*base);
*base = alt;
} if (resource_size(&cpuif_res) == SZ_128K) { /* * Verify that we have the first 4kB of a GICv2 * aliased over the first 64kB by checking the * GICC_IIDR register on both ends.
*/ if (!gic_check_gicv2(*base) ||
!gic_check_gicv2(*base + 0xf000)) returnfalse;
/* * Move the base up by 60kB, so that we have a 8kB * contiguous region, which allows us to use GICC_DIR * at its normal offset. Please pass me that bucket.
*/
*base += 0xf000;
cpuif_res.start += 0xf000;
pr_warn("GIC: Adjusting CPU interface base to %pa\n",
&cpuif_res.start);
}
returntrue;
}
staticbool gic_enable_rmw_access(void *data)
{ /* * The EMEV2 class of machines has a broken interconnect, and * locks up on accesses that are less than 32bit. So far, only * the affinity setting requires it.
*/ if (of_machine_is_compatible("renesas,emev2")) {
static_branch_enable(&needs_rmw_access); returntrue;
}
gic_v2_kvm_info.maint_irq = irq_of_parse_and_map(node, 0); if (!gic_v2_kvm_info.maint_irq) return;
ret = of_address_to_resource(node, 2, vctrl_res); if (ret) return;
ret = of_address_to_resource(node, 3, vcpu_res); if (ret) return;
if (static_branch_likely(&supports_deactivate_key))
vgic_set_kvm_info(&gic_v2_kvm_info);
}
int __init
gic_of_init(struct device_node *node, struct device_node *parent)
{ struct gic_chip_data *gic; int irq, ret;
if (WARN_ON(!node)) return -ENODEV;
if (WARN_ON(gic_cnt >= CONFIG_ARM_GIC_MAX_NR)) return -EINVAL;
gic = &gic_data[gic_cnt];
ret = gic_of_setup(gic, node); if (ret) return ret;
/* * Disable split EOI/Deactivate if either HYP is not available * or the CPU interface is too small.
*/ if (gic_cnt == 0 && !gic_check_eoimode(node, &gic->raw_cpu_base))
static_branch_disable(&supports_deactivate_key);
ret = __gic_init_bases(gic, &node->fwnode); if (ret) {
gic_teardown(gic); return ret;
}
if (!gic_cnt) {
gic_init_physaddr(node);
gic_of_setup_kvm_info(node);
}
if (parent) {
irq = irq_of_parse_and_map(node, 0);
gic_cascade_irq(gic_cnt, irq);
}
if (IS_ENABLED(CONFIG_ARM_GIC_V2M))
gicv2m_init(&node->fwnode, gic_data[gic_cnt].domain);
if (BAD_MADT_GICC_ENTRY(processor, end)) return -EINVAL;
/* * There is no support for non-banked GICv1/2 register in ACPI spec. * All CPU interface addresses have to be the same.
*/
gic_cpu_base = processor->base_address; if (cpu_base_assigned && gic_cpu_base != acpi_data.cpu_phys_base) return -EINVAL;
/* The things you have to do to just *count* something... */ staticint __init acpi_dummy_func(union acpi_subtable_headers *header, constunsignedlong end)
{ return 0;
}
/* * Disable split EOI/Deactivate if HYP is not available. ACPI * guarantees that we'll always have a GICv2, so the CPU * interface will always be the right size.
*/ if (!is_hyp_mode_available())
static_branch_disable(&supports_deactivate_key);
/* * Initialize GIC instance zero (no multi-GIC support).
*/
gsi_domain_handle = irq_domain_alloc_fwnode(&dist->base_address); if (!gsi_domain_handle) {
pr_err("Unable to allocate domain handle\n");
gic_teardown(gic); return -ENOMEM;
}
ret = __gic_init_bases(gic, gsi_domain_handle); if (ret) {
pr_err("Failed to initialise GIC\n");
irq_domain_free_fwnode(gsi_domain_handle);
gic_teardown(gic); return ret;
}
Die Informationen auf dieser Webseite wurden
nach bestem Wissen sorgfältig zusammengestellt. Es wird jedoch weder Vollständigkeit, noch Richtigkeit,
noch Qualität der bereit gestellten Informationen zugesichert.
Bemerkung:
Die farbliche Syntaxdarstellung und die Messung sind noch experimentell.