/* * The public API for this code is documented in arch/arm/include/asm/mcpm.h. * For a comprehensive description of the main algorithm used here, please * see Documentation/arch/arm/cluster-pm-race-avoidance.rst.
*/
struct sync_struct mcpm_sync;
/* * __mcpm_cpu_going_down: Indicates that the cpu is being torn down. * This must be called at the point of committing to teardown of a CPU. * The CPU cache (SCTRL.C bit) is expected to still be active.
*/ staticvoid __mcpm_cpu_going_down(unsignedint cpu, unsignedint cluster)
{
mcpm_sync.clusters[cluster].cpus[cpu].cpu = CPU_GOING_DOWN;
sync_cache_w(&mcpm_sync.clusters[cluster].cpus[cpu].cpu);
}
/* * __mcpm_cpu_down: Indicates that cpu teardown is complete and that the * cluster can be torn down without disrupting this CPU. * To avoid deadlocks, this must be called before a CPU is powered down. * The CPU cache (SCTRL.C bit) is expected to be off. * However L2 cache might or might not be active.
*/ staticvoid __mcpm_cpu_down(unsignedint cpu, unsignedint cluster)
{
dmb();
mcpm_sync.clusters[cluster].cpus[cpu].cpu = CPU_DOWN;
sync_cache_w(&mcpm_sync.clusters[cluster].cpus[cpu].cpu);
sev();
}
/* * __mcpm_outbound_leave_critical: Leave the cluster teardown critical section. * @state: the final state of the cluster: * CLUSTER_UP: no destructive teardown was done and the cluster has been * restored to the previous state (CPU cache still active); or * CLUSTER_DOWN: the cluster has been torn-down, ready for power-off * (CPU cache disabled, L2 cache either enabled or disabled).
*/ staticvoid __mcpm_outbound_leave_critical(unsignedint cluster, int state)
{
dmb();
mcpm_sync.clusters[cluster].cluster = state;
sync_cache_w(&mcpm_sync.clusters[cluster].cluster);
sev();
}
/* * __mcpm_outbound_enter_critical: Enter the cluster teardown critical section. * This function should be called by the last man, after local CPU teardown * is complete. CPU cache expected to be active. * * Returns: * false: the critical section was not entered because an inbound CPU was * observed, or the cluster is already being set up; * true: the critical section was entered: it is now safe to tear down the * cluster.
*/ staticbool __mcpm_outbound_enter_critical(unsignedint cpu, unsignedint cluster)
{ unsignedint i; struct mcpm_sync_struct *c = &mcpm_sync.clusters[cluster];
/* Warn inbound CPUs that the cluster is being torn down: */
c->cluster = CLUSTER_GOING_DOWN;
sync_cache_w(&c->cluster);
/* Back out if the inbound cluster is already in the critical region: */
sync_cache_r(&c->inbound); if (c->inbound == INBOUND_COMING_UP) goto abort;
/* * Wait for all CPUs to get out of the GOING_DOWN state, so that local * teardown is complete on each CPU before tearing down the cluster. * * If any CPU has been woken up again from the DOWN state, then we * shouldn't be taking the cluster down at all: abort in that case.
*/
sync_cache_r(&c->cpus); for (i = 0; i < MAX_CPUS_PER_CLUSTER; i++) { int cpustate;
if (i == cpu) continue;
while (1) {
cpustate = c->cpus[i].cpu; if (cpustate != CPU_GOING_DOWN) break;
/* * We can't use regular spinlocks. In the switcher case, it is possible * for an outbound CPU to call power_down() after its inbound counterpart * is already live using the same logical CPU number which trips lockdep * debugging.
*/ static arch_spinlock_t mcpm_lock = __ARCH_SPIN_LOCK_UNLOCKED;
staticinlinebool mcpm_cluster_unused(unsignedint cluster)
{ int i, cnt; for (i = 0, cnt = 0; i < MAX_CPUS_PER_CLUSTER; i++)
cnt |= mcpm_cpu_use_count[cluster][i]; return !cnt;
}
int mcpm_cpu_power_up(unsignedint cpu, unsignedint cluster)
{ bool cpu_is_down, cluster_is_down; int ret = 0;
pr_debug("%s: cpu %u cluster %u\n", __func__, cpu, cluster); if (!platform_ops) return -EUNATCH; /* try not to shadow power_up errors */
might_sleep();
/* * Since this is called with IRQs enabled, and no arch_spin_lock_irq * variant exists, we need to disable IRQs manually here.
*/
local_irq_disable();
arch_spin_lock(&mcpm_lock);
mcpm_cpu_use_count[cluster][cpu]++; /* * The only possible values are: * 0 = CPU down * 1 = CPU (still) up * 2 = CPU requested to be up before it had a chance * to actually make itself down. * Any other value is a bug.
*/
BUG_ON(mcpm_cpu_use_count[cluster][cpu] != 1 &&
mcpm_cpu_use_count[cluster][cpu] != 2);
if (cluster_is_down)
ret = platform_ops->cluster_powerup(cluster); if (cpu_is_down && !ret)
ret = platform_ops->cpu_powerup(cpu, cluster);
if (last_man && __mcpm_outbound_enter_critical(cpu, cluster)) {
platform_ops->cpu_powerdown_prepare(cpu, cluster);
platform_ops->cluster_powerdown_prepare(cluster);
arch_spin_unlock(&mcpm_lock);
platform_ops->cluster_cache_disable();
__mcpm_outbound_leave_critical(cluster, CLUSTER_DOWN);
} else { if (cpu_going_down)
platform_ops->cpu_powerdown_prepare(cpu, cluster);
arch_spin_unlock(&mcpm_lock); /* * If cpu_going_down is false here, that means a power_up * request raced ahead of us. Even if we do not want to * shut this CPU down, the caller still expects execution * to return through the system resume entry path, like * when the WFI is aborted due to a new IRQ or the like.. * So let's continue with cache cleaning in all cases.
*/
platform_ops->cpu_cache_disable();
}
__mcpm_cpu_down(cpu, cluster);
/* Now we are prepared for power-down, do it: */ if (cpu_going_down)
wfi();
/* * It is possible for a power_up request to happen concurrently * with a power_down request for the same CPU. In this case the * CPU might not be able to actually enter a powered down state * with the WFI instruction if the power_up request has removed * the required reset condition. We must perform a re-entry in * the kernel as if the power_up method just had deasserted reset * on the CPU.
*/
phys_reset = (phys_reset_t)(unsignedlong)__pa_symbol(cpu_reset);
phys_reset(__pa_symbol(mcpm_entry_point), false);
/* should never get here */
BUG();
}
int mcpm_wait_for_cpu_powerdown(unsignedint cpu, unsignedint cluster)
{ int ret;
if (WARN_ON_ONCE(!platform_ops || !platform_ops->wait_for_powerdown)) return -EUNATCH;
ret = platform_ops->wait_for_powerdown(cpu, cluster); if (ret)
pr_warn("%s: cpu %u, cluster %u failed to power down (%d)\n",
__func__, cpu, cluster, ret);
return ret;
}
void mcpm_cpu_suspend(void)
{ if (WARN_ON_ONCE(!platform_ops)) return;
/* Some platforms might have to enable special resume modes, etc. */ if (platform_ops->cpu_suspend_prepare) { unsignedint mpidr = read_cpuid_mpidr(); unsignedint cpu = MPIDR_AFFINITY_LEVEL(mpidr, 0); unsignedint cluster = MPIDR_AFFINITY_LEVEL(mpidr, 1);
arch_spin_lock(&mcpm_lock);
platform_ops->cpu_suspend_prepare(cpu, cluster);
arch_spin_unlock(&mcpm_lock);
}
mcpm_cpu_power_down();
}
if (first_man && platform_ops->cluster_is_up)
platform_ops->cluster_is_up(cluster); if (cpu_was_down)
mcpm_cpu_use_count[cluster][cpu] = 1; if (platform_ops->cpu_is_up)
platform_ops->cpu_is_up(cpu, cluster);
int __init mcpm_loopback(void (*cache_disable)(void))
{ int ret;
/* * We're going to soft-restart the current CPU through the * low-level MCPM code by leveraging the suspend/resume * infrastructure. Let's play it safe by using cpu_pm_enter() * in case the CPU init code path resets the VFP or similar.
*/
local_irq_disable();
local_fiq_disable();
ret = cpu_pm_enter(); if (!ret) {
ret = cpu_suspend((unsignedlong)cache_disable, nocache_trampoline);
cpu_pm_exit();
}
local_fiq_enable();
local_irq_enable(); if (ret)
pr_err("%s returned %d\n", __func__, ret); return ret;
}
#endif
externunsignedlong mcpm_power_up_setup_phys;
int __init mcpm_sync_init( void (*power_up_setup)(unsignedint affinity_level))
{ unsignedint i, j, mpidr, this_cluster;
/* * Set initial CPU and cluster states. * Only one cluster is assumed to be active at this point.
*/ for (i = 0; i < MAX_NR_CLUSTERS; i++) {
mcpm_sync.clusters[i].cluster = CLUSTER_DOWN;
mcpm_sync.clusters[i].inbound = INBOUND_NOT_COMING_UP; for (j = 0; j < MAX_CPUS_PER_CLUSTER; j++)
mcpm_sync.clusters[i].cpus[j].cpu = CPU_DOWN;
}
mpidr = read_cpuid_mpidr();
this_cluster = MPIDR_AFFINITY_LEVEL(mpidr, 1);
for_each_online_cpu(i) {
mcpm_cpu_use_count[this_cluster][i] = 1;
mcpm_sync.clusters[this_cluster].cpus[i].cpu = CPU_UP;
}
mcpm_sync.clusters[this_cluster].cluster = CLUSTER_UP;
sync_cache_w(&mcpm_sync);
if (power_up_setup) {
mcpm_power_up_setup_phys = __pa_symbol(power_up_setup);
sync_cache_w(&mcpm_power_up_setup_phys);
}
return 0;
}
Messung V0.5
¤ Dauer der Verarbeitung: 0.1 Sekunden
(vorverarbeitet)
¤
Die Informationen auf dieser Webseite wurden
nach bestem Wissen sorgfältig zusammengestellt. Es wird jedoch weder Vollständigkeit, noch Richtigkeit,
noch Qualität der bereit gestellten Informationen zugesichert.
Bemerkung:
Die farbliche Syntaxdarstellung und die Messung sind noch experimentell.