staticbool sunxi_core_is_cortex_a15(unsignedint core, unsignedint cluster)
{ struct device_node *node; int cpu = cluster * SUNXI_CPUS_PER_CLUSTER + core; bool is_compatible;
node = of_cpu_device_node_get(cpu);
/* In case of_cpu_device_node_get fails */ if (!node)
node = of_get_cpu_node(cpu, NULL);
if (!node) { /* * There's no point in returning an error, since we * would be mid way in a core or cluster power sequence.
*/
pr_err("%s: Couldn't get CPU cluster %u core %u device node\n",
__func__, cluster, core);
/* * Allwinner code also asserts resets for NEON on A15. According * to ARM manuals, asserting power-on reset is sufficient.
*/ if (!sunxi_core_is_cortex_a15(cpu, cluster))
reg &= ~CPUCFG_CX_RST_CTRL_ETM_RST(cpu);
/* * Allwinner code also asserts resets for NEON on A15. According * to ARM manuals, asserting power-on reset is sufficient.
*/ if (!sunxi_core_is_cortex_a15(0, cluster))
reg &= ~CPUCFG_CX_RST_CTRL_ETM_RST_ALL;
/* * This bit is shared between the initial nocache_trampoline call to * enable CCI-400 and proper cluster cache disable before power down.
*/ staticvoid sunxi_cluster_cache_disable_without_axi(void)
{ if (read_cpuid_part() == ARM_CPU_PART_CORTEX_A15) { /* * On the Cortex-A15 we need to disable * L2 prefetching before flushing the cache.
*/ asmvolatile( "mcr p15, 1, %0, c15, c0, 3\n" "isb\n" "dsb"
: : "r" (0x400));
}
/* Flush all cache levels for this cluster. */
v7_exit_coherency_flush(all);
/* * Disable cluster-level coherency by masking * incoming snoops and DVM messages:
*/
cci_disable_port_by_cpu(read_cpuid_mpidr());
}
staticint sunxi_mc_smp_cpu_table[SUNXI_NR_CLUSTERS][SUNXI_CPUS_PER_CLUSTER]; int sunxi_mc_smp_first_comer;
static DEFINE_SPINLOCK(boot_lock);
staticbool sunxi_mc_smp_cluster_is_down(unsignedint cluster)
{ int i;
for (i = 0; i < SUNXI_CPUS_PER_CLUSTER; i++) if (sunxi_mc_smp_cpu_table[cluster][i]) returnfalse; returntrue;
}
staticvoid sunxi_mc_smp_secondary_init(unsignedint cpu)
{ /* Clear hotplug support magic flags for cpu0 */ if (cpu == 0)
sunxi_cpu0_hotplug_support_set(false);
}
mpidr = cpu_logical_map(l_cpu);
cpu = MPIDR_AFFINITY_LEVEL(mpidr, 0);
cluster = MPIDR_AFFINITY_LEVEL(mpidr, 1);
pr_debug("%s: cluster %u cpu %u\n", __func__, cluster, cpu);
spin_lock(&boot_lock);
sunxi_mc_smp_cpu_table[cluster][cpu]--; if (sunxi_mc_smp_cpu_table[cluster][cpu] == 1) { /* A power_up request went ahead of us. */
pr_debug("%s: aborting due to a power up request\n",
__func__);
spin_unlock(&boot_lock); return;
} elseif (sunxi_mc_smp_cpu_table[cluster][cpu] > 1) {
pr_err("Cluster %d CPU%d boots multiple times\n",
cluster, cpu);
BUG();
}
/* This should never happen */ if (WARN_ON(cluster >= SUNXI_NR_CLUSTERS ||
cpu >= SUNXI_CPUS_PER_CLUSTER)) return 0;
/* wait for CPU core to die and enter WFI */
count = TIMEOUT_USEC / POLL_USEC;
spin_lock_irq(&boot_lock); for (tries = 0; tries < count; tries++) {
spin_unlock_irq(&boot_lock);
usleep_range(POLL_USEC / 2, POLL_USEC);
spin_lock_irq(&boot_lock);
/* * If the user turns off a bunch of cores at the same * time, the kernel might call cpu_kill before some of * them are ready. This is because boot_lock serializes * both cpu_die and cpu_kill callbacks. Either one could * run first. We should wait for cpu_die to complete.
*/ if (sunxi_mc_smp_cpu_table[cluster][cpu]) continue;
if (tries >= count) {
ret = ETIMEDOUT; goto out;
}
/* power down CPU core */
sunxi_cpu_powerdown(cpu, cluster);
if (!sunxi_mc_smp_cluster_is_down(cluster)) goto out;
/* wait for cluster L2 WFI */
ret = readl_poll_timeout(cpucfg_base + CPUCFG_CX_STATUS(cluster), reg,
reg & CPUCFG_CX_STATUS_STANDBYWFIL2,
POLL_USEC, TIMEOUT_USEC); if (ret) { /* * Ignore timeout on the cluster. Leaving the cluster on * will not affect system execution, just use a bit more * power. But returning an error here will only confuse * the user as the CPU has already been shutdown.
*/
ret = 0; goto out;
}
/* Power down cluster */
sunxi_cluster_powerdown(cluster);
if (cluster >= SUNXI_NR_CLUSTERS || cpu >= SUNXI_CPUS_PER_CLUSTER) {
pr_err("%s: boot CPU is out of bounds!\n", __func__); returnfalse;
}
sunxi_mc_smp_cpu_table[cluster][cpu] = 1; returntrue;
}
/* * Adapted from arch/arm/common/mc_smp_entry.c * * We need the trampoline code to enable CCI-400 on the first cluster
*/ typedef typeof(cpu_reset) phys_reset_t;
staticint __init sunxi_mc_smp_loopback(void)
{ int ret;
/* * We're going to soft-restart the current CPU through the * low-level MCPM code by leveraging the suspend/resume * infrastructure. Let's play it safe by using cpu_pm_enter() * in case the CPU init code path resets the VFP or similar.
*/
sunxi_mc_smp_first_comer = true;
local_irq_disable();
local_fiq_disable();
ret = cpu_pm_enter(); if (!ret) {
ret = cpu_suspend(0, nocache_trampoline);
cpu_pm_exit();
}
local_fiq_enable();
local_irq_enable();
sunxi_mc_smp_first_comer = false;
return ret;
}
/* * This holds any device nodes that we requested resources for, * so that we may easily release resources in the error path.
*/ struct sunxi_mc_smp_nodes { struct device_node *prcm_node; struct device_node *cpucfg_node; struct device_node *sram_node; struct device_node *r_cpucfg_node;
};
/* This structure holds SoC-specific bits tied to an enable-method string. */ struct sunxi_mc_smp_data { constchar *enable_method; int (*get_smp_nodes)(struct sunxi_mc_smp_nodes *nodes); bool is_a83t;
};
/* * Don't bother checking the "cpus" node, as an enable-method * property in that node is undocumented.
*/
node = of_cpu_device_node_get(0); if (!node) return -ENODEV;
/* * We can't actually use the enable-method magic in the kernel. * Our loopback / trampoline code uses the CPU suspend framework, * which requires the identity mapping be available. It would not * yet be available if we used the .init_cpus or .prepare_cpus * callbacks in smp_operations, which we would use if we were to * use CPU_METHOD_OF_DECLARE
*/ for (i = 0; i < ARRAY_SIZE(sunxi_mc_smp_data); i++) {
ret = of_property_match_string(node, "enable-method",
sunxi_mc_smp_data[i].enable_method); if (ret >= 0) break;
}
of_node_put(node); if (ret < 0) return -ENODEV;
is_a83t = sunxi_mc_smp_data[i].is_a83t;
if (!sunxi_mc_smp_cpu_table_init()) return -EINVAL;
if (!cci_probed()) {
pr_err("%s: CCI-400 not available\n", __func__); return -ENODEV;
}
/* Get needed device tree nodes */
ret = sunxi_mc_smp_data[i].get_smp_nodes(&nodes); if (ret) goto err_put_nodes;
/* * Unfortunately we can not request the I/O region for the PRCM. * It is shared with the PRCM clock.
*/
prcm_base = of_iomap(nodes.prcm_node, 0); if (!prcm_base) {
pr_err("%s: failed to map PRCM registers\n", __func__);
ret = -ENOMEM; goto err_put_nodes;
}
cpucfg_base = of_io_request_and_map(nodes.cpucfg_node, 0, "sunxi-mc-smp"); if (IS_ERR(cpucfg_base)) {
ret = PTR_ERR(cpucfg_base);
pr_err("%s: failed to map CPUCFG registers: %d\n",
__func__, ret); goto err_unmap_prcm;
}
if (is_a83t) {
r_cpucfg_base = of_io_request_and_map(nodes.r_cpucfg_node,
0, "sunxi-mc-smp"); if (IS_ERR(r_cpucfg_base)) {
ret = PTR_ERR(r_cpucfg_base);
pr_err("%s: failed to map R-CPUCFG registers\n",
__func__); goto err_unmap_release_cpucfg;
}
} else {
sram_b_smp_base = of_io_request_and_map(nodes.sram_node, 0, "sunxi-mc-smp"); if (IS_ERR(sram_b_smp_base)) {
ret = PTR_ERR(sram_b_smp_base);
pr_err("%s: failed to map secure SRAM\n", __func__); goto err_unmap_release_cpucfg;
}
}
/* Configure CCI-400 for boot cluster */
ret = sunxi_mc_smp_loopback(); if (ret) {
pr_err("%s: failed to configure boot cluster: %d\n",
__func__, ret); goto err_unmap_release_sram_rcpucfg;
}
/* We don't need the device nodes anymore */
sunxi_mc_smp_put_nodes(&nodes);
/* Set the hardware entry point address */ if (is_a83t)
addr = r_cpucfg_base + R_CPUCFG_CPU_SOFT_ENTRY_REG; else
addr = prcm_base + PRCM_CPU_SOFT_ENTRY_REG;
writel(__pa_symbol(sunxi_mc_smp_secondary_startup), addr);
/* Actually enable multi cluster SMP */
smp_set_ops(&sunxi_mc_smp_smp_ops);
pr_info("sunxi multi cluster SMP support installed\n");
Die Informationen auf dieser Webseite wurden
nach bestem Wissen sorgfältig zusammengestellt. Es wird jedoch weder Vollständigkeit, noch Richtigkeit,
noch Qualität der bereit gestellten Informationen zugesichert.
Bemerkung:
Die farbliche Syntaxdarstellung und die Messung sind noch experimentell.