/* * as from 2.5, kernels no longer have an init_tasks structure * so we need some other way of telling a new secondary core * where to place its SVC stack
*/ struct secondary_data secondary_data;
enum ipi_msg_type {
IPI_WAKEUP,
IPI_TIMER,
IPI_RESCHEDULE,
IPI_CALL_FUNC,
IPI_CPU_STOP,
IPI_IRQ_WORK,
IPI_COMPLETION,
NR_IPI, /* * CPU_BACKTRACE is special and not included in NR_IPI * or tracable with trace_ipi_*
*/
IPI_CPU_BACKTRACE = NR_IPI, /* * SGI8-15 can be reserved by secure firmware, and thus may * not be usable by the kernel. Please keep the above limited * to at most 8 entries.
*/
MAX_IPI
};
int __cpu_up(unsignedint cpu, struct task_struct *idle)
{ int ret;
if (!smp_ops.smp_boot_secondary) return -ENOSYS;
ret = secondary_biglittle_prepare(cpu); if (ret) return ret;
/* * We need to tell the secondary core where to find * its stack and the page tables.
*/
secondary_data.stack = task_stack_page(idle) + THREAD_START_SP; #ifdef CONFIG_ARM_MPU
secondary_data.mpu_rgn_info = &mpu_rgn_info; #endif
/* * Now bring the CPU into our world.
*/
ret = smp_ops.smp_boot_secondary(cpu, idle); if (ret == 0) { /* * CPU was successfully started, wait for it * to come online or time out.
*/
wait_for_completion_timeout(&cpu_running,
msecs_to_jiffies(1000));
if (!cpu_online(cpu)) {
pr_crit("CPU%u: failed to come online\n", cpu);
ret = -EIO;
}
} else {
pr_err("CPU%u: failed to boot: %d\n", cpu, ret);
}
staticint platform_cpu_disable(unsignedint cpu)
{ if (smp_ops.cpu_disable) return smp_ops.cpu_disable(cpu);
return 0;
}
int platform_can_hotplug_cpu(unsignedint cpu)
{ /* cpu_die must be specified to support hotplug */ if (!smp_ops.cpu_die) return 0;
if (smp_ops.cpu_can_disable) return smp_ops.cpu_can_disable(cpu);
/* * By default, allow disabling all CPUs except the first one, * since this is special on a lot of platforms, e.g. because * of clock tick interrupts.
*/ return cpu != 0;
}
staticvoid ipi_teardown(int cpu)
{ int i;
if (WARN_ON_ONCE(!ipi_irq_base)) return;
for (i = 0; i < nr_ipi; i++)
disable_percpu_irq(ipi_irq_base + i);
}
/* * __cpu_disable runs on the processor to be shutdown.
*/ int __cpu_disable(void)
{ unsignedint cpu = smp_processor_id(); int ret;
ret = platform_cpu_disable(cpu); if (ret) return ret;
/* * Take this CPU offline. Once we clear this, we can't return, * and we must not schedule until we're ready to give up the cpu.
*/
set_cpu_online(cpu, false);
ipi_teardown(cpu);
/* * OK - migrate IRQs away from this CPU
*/
irq_migrate_all_off_this_cpu();
/* * Flush user cache and TLB mappings, and then remove this CPU * from the vm mask set of all processes. * * Caches are flushed to the Level of Unification Inner Shareable * to write-back dirty lines to unified caches shared by all CPUs.
*/
flush_cache_louis();
local_flush_tlb_all();
return 0;
}
/* * called on the thread which is asking for a CPU to be shutdown after the * shutdown completed.
*/ void arch_cpuhp_cleanup_dead_cpu(unsignedint cpu)
{
pr_debug("CPU%u: shutdown\n", cpu);
clear_tasks_mm_cpumask(cpu); /* * platform_cpu_kill() is generally expected to do the powering off * and/or cutting of clocks to the dying CPU. Optionally, this may * be done by the CPU which is dying in preference to supporting * this call, but that means there is _no_ synchronisation between * the requesting CPU and the dying CPU actually losing power.
*/ if (!platform_cpu_kill(cpu))
pr_err("CPU%u: unable to kill\n", cpu);
}
/* * Called from the idle thread for the CPU which has been shutdown. * * Note that we disable IRQs here, but do not re-enable them * before returning to the caller. This is also the behaviour * of the other hotplug-cpu capable cores, so presumably coming * out of idle fixes this.
*/ void __noreturn arch_cpu_idle_dead(void)
{ unsignedint cpu = smp_processor_id();
idle_task_exit();
local_irq_disable();
/* * Flush the data out of the L1 cache for this CPU. This must be * before the completion to ensure that data is safely written out * before platform_cpu_kill() gets called - which may disable * *this* CPU and power down its cache.
*/
flush_cache_louis();
/* * Tell cpuhp_bp_sync_dead() that this CPU is now safe to dispose * of. Once this returns, power and/or clocks can be removed at * any point from this CPU and its cache by platform_cpu_kill().
*/
cpuhp_ap_report_dead();
/* * Ensure that the cache lines associated with that completion are * written out. This covers the case where _this_ CPU is doing the * powering down, to ensure that the completion is visible to the * CPU waiting for this one.
*/
flush_cache_louis();
/* * The actual CPU shutdown procedure is at least platform (if not * CPU) specific. This may remove power, or it may simply spin. * * Platforms are generally expected *NOT* to return from this call, * although there are some which do because they have no way to * power down the CPU. These platforms are the _only_ reason we * have a return path which uses the fragment of assembly below. * * The return path should not be used for platforms which can * power off the CPU.
*/ if (smp_ops.cpu_die)
smp_ops.cpu_die(cpu);
pr_warn("CPU%u: smp_ops.cpu_die() returned, trying to resuscitate\n",
cpu);
/* * Do not return to the idle loop - jump back to the secondary * cpu initialisation. There's some initialisation which needs * to be repeated to undo the effects of taking the CPU offline.
*/
__asm__("mov sp, %0\n" " mov fp, #0\n" " mov r0, %1\n" " b secondary_start_kernel"
:
: "r" (task_stack_page(current) + THREAD_SIZE - 8), "r" (current)
: "r0");
unreachable();
} #endif/* CONFIG_HOTPLUG_CPU */
/* * Called by both boot and secondaries to move global data into * per-processor storage.
*/ staticvoid smp_store_cpu_info(unsignedint cpuid)
{ struct cpuinfo_arm *cpu_info = &per_cpu(cpu_data, cpuid);
/* * This is the secondary CPU boot entry. We're using this CPUs * idle thread stack, but a set of temporary page tables.
*/
asmlinkage void secondary_start_kernel(struct task_struct *task)
{ struct mm_struct *mm = &init_mm; unsignedint cpu;
set_current(task);
secondary_biglittle_init();
/* * The identity mapping is uncached (strongly ordered), so * switch away from it before attempting any exclusive accesses.
*/
cpu_switch_mm(mm->pgd, mm);
local_flush_bp_all();
enter_lazy_tlb(mm, current);
local_flush_tlb_all();
/* * All kernel threads share the same mm context; grab a * reference and switch to it.
*/
cpu = smp_processor_id();
mmgrab(mm);
current->active_mm = mm;
cpumask_set_cpu(cpu, mm_cpumask(mm));
/* * Give the platform a chance to do its own initialisation.
*/ if (smp_ops.smp_secondary_init)
smp_ops.smp_secondary_init(cpu);
notify_cpu_starting(cpu);
ipi_setup(cpu);
calibrate_delay();
smp_store_cpu_info(cpu);
/* * OK, now it's safe to let the boot CPU continue. Wait for * the CPU migration code to notice that the CPU is online * before we continue - which happens after __cpu_up returns.
*/
set_cpu_online(cpu, true);
/* * are we trying to boot more cores than exist?
*/ if (max_cpus > ncores)
max_cpus = ncores; if (ncores > 1 && max_cpus) { /* * Initialise the present map, which describes the set of CPUs * actually populated at the present time. A platform should * re-initialize the map in the platforms smp_prepare_cpus() * if present != possible (e.g. physical hotplug).
*/
init_cpu_present(cpu_possible_mask);
/* * Initialise the SCU if there are more than one CPU * and let them know where to start.
*/ if (smp_ops.smp_prepare_cpus)
smp_ops.smp_prepare_cpus(max_cpus);
}
}
if ((unsigned)ipinr < NR_IPI)
trace_ipi_exit(ipi_types[ipinr]);
}
/* Legacy version, should go away once all irqchips have been converted */ void handle_IPI(int ipinr, struct pt_regs *regs)
{ struct pt_regs *old_regs = set_irq_regs(regs);
cpumask_copy(&mask, cpu_online_mask);
cpumask_clear_cpu(smp_processor_id(), &mask); if (!cpumask_empty(&mask))
smp_cross_call(&mask, IPI_CPU_STOP);
/* Wait up to one second for other CPUs to stop */
timeout = USEC_PER_SEC; while (num_online_cpus() > 1 && timeout--)
udelay(1);
if (num_online_cpus() > 1)
pr_warn("SMP: failed to stop secondary CPUs\n");
}
/* In case panic() and panic() called at the same time on CPU1 and CPU2, * and CPU 1 calls panic_smp_self_stop() before crash_smp_send_stop() * CPU1 can't receive the ipi irqs from CPU2, CPU1 will be always online, * kdump fails. So split out the panic_smp_self_stop() and add * set_cpu_online(smp_processor_id(), false).
*/ void __noreturn panic_smp_self_stop(void)
{
pr_debug("CPU %u will stop doing anything useful since another CPU has paniced\n",
smp_processor_id());
set_cpu_online(smp_processor_id(), false); while (1)
cpu_relax();
}
Die Informationen auf dieser Webseite wurden
nach bestem Wissen sorgfältig zusammengestellt. Es wird jedoch weder Vollständigkeit, noch Richtigkeit,
noch Qualität der bereit gestellten Informationen zugesichert.
Bemerkung:
Die farbliche Syntaxdarstellung und die Messung sind noch experimentell.