qoriq_pm_ops->freeze_time_base(true); #ifdef CONFIG_PPC64 /* * e5500/e6500 have a workaround for erratum A-006958 in place * that will reread the timebase until TBL is non-zero. * That would be a bad thing when the timebase is frozen. * * Thus, we read it manually, and instead of checking that * TBL is non-zero, we ensure that TB does not change. We don't * do that for the main mftb implementation, because it requires * a scratch register
*/
{
u64 prev;
staticvoid qoriq_cpu_kill(unsignedint cpu)
{ int i;
for (i = 0; i < 500; i++) { if (is_cpu_dead(cpu)) { #ifdef CONFIG_PPC64
paca_ptrs[cpu]->cpu_start = 0; #endif return;
}
msleep(20);
}
pr_err("CPU%d didn't die...\n", cpu);
} #endif
/* * To keep it compatible with old boot program which uses * cache-inhibit spin table, we need to flush the cache * before accessing spin table to invalidate any staled data. * We also need to flush the cache after writing to spin * table to push data out.
*/ staticinlinevoid flush_spin_table(void *spin_table)
{
flush_dcache_range((ulong)spin_table,
(ulong)spin_table + sizeof(struct epapr_spin_table));
}
staticint smp_85xx_start_cpu(int cpu)
{ int ret = 0; struct device_node *np; const u64 *cpu_rel_addr; unsignedlong flags; int ioremappable; int hw_cpu = get_hard_smp_processor_id(cpu); struct epapr_spin_table __iomem *spin_table;
np = of_get_cpu_node(cpu, NULL);
cpu_rel_addr = of_get_property(np, "cpu-release-addr", NULL); if (!cpu_rel_addr) {
pr_err("No cpu-release-addr for cpu %d\n", cpu); return -ENOENT;
}
/* * A secondary core could be in a spinloop in the bootpage * (0xfffff000), somewhere in highmem, or somewhere in lowmem. * The bootpage and highmem can be accessed via ioremap(), but * we need to directly access the spinloop if its in lowmem.
*/
ioremappable = *cpu_rel_addr > virt_to_phys(high_memory - 1);
/* Map the spin table */ if (ioremappable)
spin_table = ioremap_coherent(*cpu_rel_addr, sizeof(struct epapr_spin_table)); else
spin_table = phys_to_virt(*cpu_rel_addr);
local_irq_save(flags);
hard_irq_disable();
if (qoriq_pm_ops && qoriq_pm_ops->cpu_up_prepare)
qoriq_pm_ops->cpu_up_prepare(cpu);
/* if cpu is not spinning, reset it */ if (read_spin_table_addr_l(spin_table) != 1) { /* * We don't set the BPTR register here since it already points * to the boot page properly.
*/
mpic_reset_core(cpu);
/* * wait until core is ready... * We need to invalidate the stale data, in case the boot * loader uses a cache-inhibited spin table.
*/ if (!spin_event_timeout(
read_spin_table_addr_l(spin_table) == 1,
10000, 100)) {
pr_err("timeout waiting for cpu %d to reset\n",
hw_cpu);
ret = -EAGAIN; goto err;
}
}
flush_spin_table(spin_table);
out_be32(&spin_table->pir, hw_cpu); #ifdef CONFIG_PPC64
out_be64((u64 *)(&spin_table->addr_h),
__pa(ppc_function_entry(generic_secondary_smp_init))); #else #ifdef CONFIG_PHYS_ADDR_T_64BIT /* * We need also to write addr_h to spin table for systems * in which their physical memory start address was configured * to above 4G, otherwise the secondary core can not get * correct entry to start from.
*/
out_be32(&spin_table->addr_h, __pa(__early_start) >> 32); #endif
out_be32(&spin_table->addr_l, __pa(__early_start)); #endif
flush_spin_table(spin_table);
err:
local_irq_restore(flags);
if (ioremappable)
iounmap(spin_table);
return ret;
}
staticint smp_85xx_kick_cpu(int nr)
{ int ret = 0; #ifdef CONFIG_PPC64 int primary = nr; #endif
WARN_ON(nr < 0 || nr >= num_possible_cpus());
pr_debug("kick CPU #%d\n", nr);
#ifdef CONFIG_PPC64 if (threads_per_core == 2) { if (WARN_ON_ONCE(!cpu_has_feature(CPU_FTR_SMT))) return -ENOENT;
if (qoriq_pm_ops && qoriq_pm_ops->cpu_up_prepare)
qoriq_pm_ops->cpu_up_prepare(nr);
/* * If either thread in the core is online, use it to start * the other.
*/ if (cpu_online(primary)) {
smp_call_function_single(primary,
wake_hw_thread, &nr, 1); goto done;
} elseif (cpu_online(primary + 1)) {
smp_call_function_single(primary + 1,
wake_hw_thread, &nr, 1); goto done;
}
/* * If getting here, it means both threads in the core are * offline. So start the primary thread, then it will start * the thread specified in booting_thread_hwid, the one * corresponding to nr.
*/
} elseif (threads_per_core == 1) { /* * If one core has only one thread, set booting_thread_hwid to * an invalid value.
*/
booting_thread_hwid = INVALID_THREAD_HWID;
} elseif (threads_per_core > 2) {
pr_err("Do not support more than 2 threads per CPU."); return -EINVAL;
}
ret = smp_85xx_start_cpu(primary); if (ret) return ret;
staticvoid mpc85xx_smp_kexec_cpu_down(int crash_shutdown, int secondary)
{
local_irq_disable();
if (secondary) {
cur_cpu_spec->cpu_down_flush();
atomic_inc(&kexec_down_cpus); /* loop forever */ while (1);
}
}
staticvoid mpc85xx_smp_kexec_down(void *arg)
{ if (ppc_md.kexec_cpu_down)
ppc_md.kexec_cpu_down(0,1);
} #else staticvoid mpc85xx_smp_kexec_cpu_down(int crash_shutdown, int secondary)
{ int cpu = smp_processor_id(); int sibling = cpu_last_thread_sibling(cpu); bool notified = false; int disable_cpu; int disable_threadbit = 0; long start = mftb(); long now;
#ifdef CONFIG_CRASH_DUMP if (cpu == crashing_cpu && cpu_thread_in_core(cpu) != 0) { /* * We enter the crash kernel on whatever cpu crashed, * even if it's a secondary thread. If that's the case, * disable the corresponding primary thread.
*/
disable_threadbit = 1;
disable_cpu = cpu_first_thread_sibling(cpu);
} elseif (sibling == crashing_cpu) { return;
} #endif if (cpu_thread_in_core(cpu) == 0 && cpu_thread_in_core(sibling) != 0) {
disable_threadbit = 2;
disable_cpu = sibling;
}
if (disable_threadbit) { while (paca_ptrs[disable_cpu]->kexec_state < KEXEC_STATE_REAL_MODE) {
barrier();
now = mftb(); if (!notified && now - start > 1000000) {
pr_info("%s/%d: waiting for cpu %d to enter KEXEC_STATE_REAL_MODE (%d)\n",
__func__, smp_processor_id(),
disable_cpu,
paca_ptrs[disable_cpu]->kexec_state);
notified = true;
}
}
if (notified) {
pr_info("%s: cpu %d done waiting\n",
__func__, disable_cpu);
}
mtspr(SPRN_TENC, disable_threadbit); while (mfspr(SPRN_TENSR) & disable_threadbit)
cpu_relax();
}
} #endif
staticvoid mpc85xx_smp_machine_kexec(struct kimage *image)
{ #ifdef CONFIG_PPC32 int timeout = INT_MAX; int i, num_cpus = num_present_cpus();
if (image->type == KEXEC_TYPE_DEFAULT)
smp_call_function(mpc85xx_smp_kexec_down, NULL, 0);
Die Informationen auf dieser Webseite wurden
nach bestem Wissen sorgfältig zusammengestellt. Es wird jedoch weder Vollständigkeit, noch Richtigkeit,
noch Qualität der bereit gestellten Informationen zugesichert.
Bemerkung:
Die farbliche Syntaxdarstellung und die Messung sind noch experimentell.