// SPDX-License-Identifier: GPL-2.0-or-later /* ** SMP Support ** ** Copyright (C) 1999 Walt Drummond <drummond@valinux.com> ** Copyright (C) 1999 David Mosberger-Tang <davidm@hpl.hp.com> ** Copyright (C) 2001,2004 Grant Grundler <grundler@parisc-linux.org> ** ** Lots of stuff stolen from arch/alpha/kernel/smp.c ** ...and then parisc stole from arch/ia64/kernel/smp.c. Thanks David! :^) ** ** Thanks to John Curry and Ullas Ponnadi. I learned a lot from their work. ** -grant (1/12/2001) **
*/ #include <linux/types.h> #include <linux/spinlock.h>
/********** SMP inter processor interrupt and communication routines */
#undef PER_CPU_IRQ_REGION #ifdef PER_CPU_IRQ_REGION /* XXX REVISIT Ignore for now. ** *May* need this "hook" to register IPI handler ** once we have perCPU ExtIntr switch tables.
*/ staticvoid
ipi_init(int cpuid)
{ #error verify IRQ_OFFSET(IPI_IRQ) is ipi_interrupt() in new IRQ region
/* ** Yoink this CPU from the runnable list... **
*/ staticvoid
halt_processor(void)
{ /* REVISIT : redirect I/O Interrupts to another CPU? */ /* REVISIT : does PM *know* this CPU isn't available? */
set_cpu_online(smp_processor_id(), false);
local_irq_disable();
__pdc_cpu_rendezvous(); for (;;)
;
}
case IPI_RESCHEDULE:
smp_debug(100, KERN_DEBUG "CPU%d IPI_RESCHEDULE\n", this_cpu);
inc_irq_stat(irq_resched_count);
scheduler_ipi(); break;
case IPI_CALL_FUNC:
smp_debug(100, KERN_DEBUG "CPU%d IPI_CALL_FUNC\n", this_cpu);
inc_irq_stat(irq_call_count);
generic_smp_call_function_interrupt(); break;
case IPI_CPU_START:
smp_debug(100, KERN_DEBUG "CPU%d IPI_CPU_START\n", this_cpu); break;
case IPI_CPU_STOP:
smp_debug(100, KERN_DEBUG "CPU%d IPI_CPU_STOP\n", this_cpu);
halt_processor(); break;
case IPI_CPU_TEST:
smp_debug(100, KERN_DEBUG "CPU%d is alive!\n", this_cpu); break; #ifdef CONFIG_KGDB case IPI_ENTER_KGDB:
smp_debug(100, KERN_DEBUG "CPU%d ENTER_KGDB\n", this_cpu);
kgdb_nmicallback(raw_smp_processor_id(), get_irq_regs()); break; #endif default:
printk(KERN_CRIT "Unknown IPI num on CPU%d: %lu\n",
this_cpu, which); return IRQ_NONE;
} /* Switch */
/* before doing more, let in any pending interrupts */ if (ops) {
local_irq_enable();
local_irq_disable();
}
} /* while (ops) */
} return IRQ_HANDLED;
}
/* * Called by secondaries to update state and initialize CPU registers.
*/ staticvoid
smp_cpu_init(int cpunum)
{ /* Set modes and Enable floating point coprocessor */
init_per_cpu(cpunum);
disable_sr_hashing();
mb();
/* Well, support 2.4 linux scheme as well. */ if (cpu_online(cpunum)) { externvoid machine_halt(void); /* arch/parisc.../process.c */
/* Initialise the idle task for this CPU */
mmgrab(&init_mm);
current->active_mm = &init_mm;
BUG_ON(current->mm);
enter_lazy_tlb(&init_mm, current);
init_IRQ(); /* make sure no IRQs are enabled or pending */
parisc_clockevent_init();
}
/* * Slaves start using C here. Indirectly called from smp_slave_stext. * Do what start_kernel() and main() do for boot strap processor (aka monarch)
*/ void smp_callin(unsignedlong pdce_proc)
{ int slave_id = cpu_now_booting;
/* * Bring one cpu online.
*/ staticint smp_boot_one_cpu(int cpuid, struct task_struct *idle)
{ conststruct cpuinfo_parisc *p = &per_cpu(cpu_data, cpuid); long timeout;
#ifdef CONFIG_HOTPLUG_CPU int i;
/* reset irq statistics for this CPU */
memset(&per_cpu(irq_stat, cpuid), 0, sizeof(irq_cpustat_t)); for (i = 0; i < NR_IRQS; i++) { struct irq_desc *desc = irq_to_desc(i);
/* wait until last booting CPU has started. */ while (cpu_now_booting)
;
/* Let _start know what logical CPU we're booting ** (offset into init_tasks[],cpu_data[])
*/
cpu_now_booting = cpuid;
/* ** boot strap code needs to know the task address since ** it also contains the process stack.
*/
smp_init_current_idle_task = idle ;
mb();
printk(KERN_INFO "Releasing cpu %d now, hpa=%lx\n", cpuid, p->hpa);
/* ** This gets PDC to release the CPU from a very tight loop. ** ** From the PA-RISC 2.0 Firmware Architecture Reference Specification: ** "The MEM_RENDEZ vector specifies the location of OS_RENDEZ which ** is executed after receiving the rendezvous signal (an interrupt to ** EIR{0}). MEM_RENDEZ is valid only when it is nonzero and the ** contents of memory are valid."
*/
gsc_writel(TIMER_IRQ - CPU_IRQ_BASE, p->hpa);
mb();
/* * OK, wait a bit for that CPU to finish staggering about. * Slave will set a bit when it reaches smp_cpu_init(). * Once the "monarch CPU" sees the bit change, it can move on.
*/ for (timeout = 0; timeout < 10000; timeout++) { if(cpu_online(cpuid)) { /* Which implies Slave has started up */
cpu_now_booting = 0; goto alive ;
}
udelay(100);
barrier();
}
printk(KERN_CRIT "SMP: CPU:%d is stuck.\n", cpuid); return -1;
alive: /* Remember the Slave data */
smp_debug(100, KERN_DEBUG "SMP: CPU:%d came alive after %ld _us\n",
cpuid, timeout * 100); return 0;
}
void __init smp_prepare_boot_cpu(void)
{
pr_info("SMP: bootstrap CPU ID is 0\n");
}
/* ** inventory.c:do_inventory() hasn't yet been run and thus we ** don't 'discover' the additional CPUs until later.
*/ void __init smp_prepare_cpus(unsignedint max_cpus)
{ int cpu;
/* * __cpu_disable runs on the processor to be shutdown.
*/ int __cpu_disable(void)
{ #ifdef CONFIG_HOTPLUG_CPU unsignedint cpu = smp_processor_id();
remove_cpu_topology(cpu);
/* * Take this CPU offline. Once we clear this, we can't return, * and we must not schedule until we're ready to give up the cpu.
*/
set_cpu_online(cpu, false);
/* Find a new timesync master */ if (cpu == time_keeper_id) {
time_keeper_id = cpumask_first(cpu_online_mask);
pr_info("CPU %d is now promoted to time-keeper master\n", time_keeper_id);
}
/* disable all irqs, including timer irq */
local_irq_disable();
/* wait for next timer irq ... */
mdelay(1000/HZ+100);
/* ... and then clear all pending external irqs */
set_eiem(0);
mtctl(~0UL, CR_EIRR);
mfctl(CR_EIRR);
mtctl(0, CR_EIRR); #endif return 0;
}
/* * called on the thread which is asking for a CPU to be shutdown - * waits until shutdown has completed, or it is timed out.
*/ void __cpu_die(unsignedint cpu)
{
pdc_cpu_rendezvous_lock();
}
void arch_cpuhp_cleanup_dead_cpu(unsignedint cpu)
{
pr_info("CPU%u: is shutting down\n", cpu);
/* set task's state to interruptible sleep */
set_current_state(TASK_INTERRUPTIBLE);
schedule_timeout((IS_ENABLED(CONFIG_64BIT) ? 8:2) * HZ);
Die Informationen auf dieser Webseite wurden
nach bestem Wissen sorgfältig zusammengestellt. Es wird jedoch weder Vollständigkeit, noch Richtigkeit,
noch Qualität der bereit gestellten Informationen zugesichert.
Bemerkung:
Die farbliche Syntaxdarstellung und die Messung sind noch experimentell.