/* * This file is subject to the terms and conditions of the GNU General Public * License. See the file "COPYING" in the main directory of this archive * for more details. * * Copyright (C) 2008 Ralf Baechle (ralf@linux-mips.org) * Copyright (C) 2012 MIPS Technologies, Inc. All rights reserved.
*/
/* Discover the next online CPU */
cpu = cpumask_next(prev, cpu_online_mask);
/* If there isn't one, we're done */ if (cpu >= nr_cpu_ids) return cpu;
/* * Move the access lock to the next CPU's GIC local register block. * * Set GIC_VL_OTHER. Since the caller holds gic_lock nothing can * clobber the written value.
*/
write_gic_vl_other(mips_cm_vp_id(cpu));
return cpu;
}
staticinlinevoid gic_unlock_cluster(void)
{ if (mips_cps_multicluster_cpus())
mips_cm_unlock_other();
}
/** * for_each_online_cpu_gic() - Iterate over online CPUs, access local registers * @cpu: An integer variable to hold the current CPU number * @gic_lock: A pointer to raw spin lock used as a guard * * Iterate over online CPUs & configure the other/redirect register region to * access each CPUs GIC local register block, which can be accessed from the * loop body using read_gic_vo_*() or write_gic_vo_*() accessor functions or * their derivatives.
*/ #define for_each_online_cpu_gic(cpu, gic_lock) \
guard(raw_spinlock_irqsave)(gic_lock); \ for ((cpu) = __gic_with_next_online_cpu(-1); \
(cpu) < nr_cpu_ids; \
gic_unlock_cluster(), \
(cpu) = __gic_with_next_online_cpu(cpu))
/** * gic_irq_lock_cluster() - Lock redirect block access to IRQ's cluster * @d: struct irq_data corresponding to the interrupt we're interested in * * Locks redirect register block access to the global register block of the GIC * within the remote cluster that the IRQ corresponding to @d is affine to, * returning true when this redirect block setup & locking has been performed. * * If @d is affine to the local cluster then no locking is performed and this * function will return false, indicating to the caller that it should access * the local clusters registers without the overhead of indirection through the * redirect block. * * In summary, if this function returns true then the caller should access GIC * registers using redirect register block accessors & then call * mips_cm_unlock_other() when done. If this function returns false then the * caller should trivially access GIC registers in the local cluster. * * Returns true if locking performed, else false.
*/ staticbool gic_irq_lock_cluster(struct irq_data *d)
{ unsignedint cpu, cl;
cpu = cpumask_first(irq_data_get_effective_affinity_mask(d));
BUG_ON(cpu >= NR_CPUS);
cl = cpu_cluster(&cpu_data[cpu]); if (cl == cpu_cluster(¤t_cpu_data)) returnfalse; if (mips_cps_numcores(cl) == 0) returnfalse;
mips_cm_lock_other(cl, 0, 0, CM_GCR_Cx_OTHER_BLOCK_GLOBAL); returntrue;
}
int gic_get_c0_compare_int(void)
{ if (!gic_local_irq_is_routable(GIC_LOCAL_INT_TIMER)) return MIPS_CPU_IRQ_BASE + cp0_compare_irq; return irq_create_mapping(gic_irq_domain,
GIC_LOCAL_TO_HWIRQ(GIC_LOCAL_INT_TIMER));
}
int gic_get_c0_perfcount_int(void)
{ if (!gic_local_irq_is_routable(GIC_LOCAL_INT_PERFCTR)) { /* Is the performance counter shared with the timer? */ if (cp0_perfcount_irq < 0) return -1; return MIPS_CPU_IRQ_BASE + cp0_perfcount_irq;
} return irq_create_mapping(gic_irq_domain,
GIC_LOCAL_TO_HWIRQ(GIC_LOCAL_INT_PERFCTR));
}
int gic_get_c0_fdc_int(void)
{ if (!gic_local_irq_is_routable(GIC_LOCAL_INT_FDC)) { /* Is the FDC IRQ even present? */ if (cp0_fdc_irq < 0) return -1; return MIPS_CPU_IRQ_BASE + cp0_fdc_irq;
}
/* * The GIC specifies that we can only route an interrupt to one VP(E), * ie. CPU in Linux parlance, at a time. Therefore we always route to * the first forced or online CPU in the mask.
*/ if (force)
cpu = cpumask_first(cpumask); else
cpu = cpumask_first_and(cpumask, cpu_online_mask);
/* * If we're moving affinity between clusters, stop routing the * interrupt to any VP(E) in the old cluster.
*/ if (cl != old_cl) { if (gic_irq_lock_cluster(d)) {
write_gic_redir_map_vp(irq, 0);
mips_cm_unlock_other();
} else {
write_gic_map_vp(irq, 0);
}
}
/* * Update effective affinity - after this gic_irq_lock_cluster() will * begin operating on the new cluster.
*/
irq_data_update_effective_affinity(d, cpumask_of(cpu));
/* * If we're moving affinity between clusters, configure the interrupt * trigger type in the new cluster.
*/ if (cl != old_cl)
gic_set_type(d, irqd_get_trigger_type(d));
/* Route the interrupt to its new VP(E) */ if (gic_irq_lock_cluster(d)) {
write_gic_redir_map_pin(irq,
GIC_MAP_PIN_MAP_TO_PIN | gic_cpu_pin);
write_gic_redir_map_vp(irq, BIT(mips_cm_vp_id(cpu)));
/* Update the pcpu_masks */
gic_clear_pcpu_masks(irq); if (read_gic_redir_mask(irq))
set_bit(irq, per_cpu_ptr(pcpu_masks, cpu));
for (i = 0; i < ARRAY_SIZE(local_intrs); i++) { unsignedint intr = local_intrs[i]; struct gic_all_vpes_chip_data *cd;
if (!gic_local_irq_is_routable(intr)) continue;
cd = &gic_all_vpes_chip_data[intr];
write_gic_vl_map(mips_gic_vx_map_reg(intr), cd->map); if (cd->mask)
write_gic_vl_smask(BIT(intr));
}
/* * If adding support for more per-cpu interrupts, keep the * array in gic_all_vpes_irq_cpu_online() in sync.
*/ switch (intr) { case GIC_LOCAL_INT_TIMER: case GIC_LOCAL_INT_PERFCTR: case GIC_LOCAL_INT_FDC: /* * HACK: These are all really percpu interrupts, but * the rest of the MIPS kernel code does not use the * percpu IRQ API for them.
*/
cd = &gic_all_vpes_chip_data[intr];
cd->map = map;
err = irq_domain_set_hwirq_and_chip(d, virq, hwirq,
&gic_all_vpes_local_irq_controller,
cd); if (err) return err;
base_hwirq = find_first_bit(ipi_available, gic_shared_intrs); if (base_hwirq == gic_shared_intrs) return -ENOMEM;
/* check that we have enough space */ for (i = base_hwirq; i < nr_irqs; i++) { if (!test_bit(i, ipi_available)) return -EBUSY;
}
bitmap_clear(ipi_available, base_hwirq, nr_irqs);
/* map the hwirq for each cpu consecutively */
i = 0;
for_each_cpu(cpu, ipimask) {
hwirq = GIC_SHARED_TO_HWIRQ(base_hwirq + i);
ret = irq_domain_set_hwirq_and_chip(d, virq + i, hwirq,
&gic_edge_irq_controller,
NULL); if (ret) goto error;
ret = irq_domain_set_hwirq_and_chip(d->parent, virq + i, hwirq,
&gic_edge_irq_controller,
NULL); if (ret) goto error;
/* Set affinity to cpu. */
irq_data_update_effective_affinity(irq_get_irq_data(virq + i),
cpumask_of(cpu));
ret = irq_set_irq_type(virq + i, IRQ_TYPE_EDGE_RISING); if (ret) goto error;
ret = gic_shared_irq_domain_map(d, virq + i, hwirq, cpu); if (ret) goto error;
/* Find the first available CPU vector. */
i = 0;
reserved = (C_SW0 | C_SW1) >> __ffs(C_SW0); while (!of_property_read_u32_index(node, "mti,reserved-cpu-vectors",
i++, &cpu_vec))
reserved |= BIT(cpu_vec);
cpu_vec = find_first_zero_bit(&reserved, hweight_long(ST0_IM)); if (cpu_vec == hweight_long(ST0_IM)) {
pr_err("No CPU vectors available\n"); return -ENODEV;
}
if (of_address_to_resource(node, 0, &res)) { /* * Probe the CM for the GIC base address if not specified * in the device-tree.
*/ if (mips_cm_present()) {
gic_base = read_gcr_gic_base() &
~CM_GCR_GIC_BASE_GICEN;
gic_len = 0x20000;
pr_warn("Using inherited base address %pa\n",
&gic_base);
} else {
pr_err("Failed to get memory range\n"); return -ENODEV;
}
} else {
gic_base = res.start;
gic_len = resource_size(&res);
}
if (mips_cm_present()) {
write_gcr_gic_base(gic_base | CM_GCR_GIC_BASE_GICEN); /* Ensure GIC region is enabled before trying to access it */
__sync();
}
mips_gic_base = ioremap(gic_base, gic_len); if (!mips_gic_base) {
pr_err("Failed to ioremap gic_base\n"); return -ENOMEM;
}
/* * Initialise each cluster's GIC shared registers to sane default * values. * Otherwise, the IPI set up will be erased if we move code * to gic_cpu_startup for each cpu.
*/
nclusters = mips_cps_numclusters(); for (cl = 0; cl < nclusters; cl++) { if (cl == cpu_cluster(¤t_cpu_data)) { for (i = 0; i < gic_shared_intrs; i++) {
change_gic_pol(i, GIC_POL_ACTIVE_HIGH);
change_gic_trig(i, GIC_TRIG_LEVEL);
write_gic_rmask(i);
}
} elseif (mips_cps_numcores(cl) != 0) {
mips_cm_lock_other(cl, 0, 0, CM_GCR_Cx_OTHER_BLOCK_GLOBAL); for (i = 0; i < gic_shared_intrs; i++) {
change_gic_redir_pol(i, GIC_POL_ACTIVE_HIGH);
change_gic_redir_trig(i, GIC_TRIG_LEVEL);
write_gic_redir_rmask(i);
}
mips_cm_unlock_other();
} else {
pr_warn("No CPU cores on the cluster %d skip it\n", cl);
}
}
Die Informationen auf dieser Webseite wurden
nach bestem Wissen sorgfältig zusammengestellt. Es wird jedoch weder Vollständigkeit, noch Richtigkeit,
noch Qualität der bereit gestellten Informationen zugesichert.
Bemerkung:
Die farbliche Syntaxdarstellung und die Messung sind noch experimentell.