void sun4m_cpu_pre_online(void *arg)
{ int cpuid = hard_smp_processor_id();
/* Allow master to continue. The master will then give us the * go-ahead by setting the smp_commenced_mask and will wait without * timeouts until our setup is completed fully (signified by * our bit being set in the cpu_online_mask).
*/
swap_ulong(&cpu_callin_map[cpuid], 1);
/* XXX: What's up with all the flushes? */
local_ops->cache_all();
local_ops->tlb_all();
/* Attach to the address space of init_task. */
mmgrab(&init_mm);
current->active_mm = &init_mm;
while (!cpumask_test_cpu(cpuid, &smp_commenced_mask))
mb();
}
/* * Cycle through the processors asking the PROM to start each one.
*/ void __init smp4m_boot_cpus(void)
{
sun4m_unmask_profile_irq();
local_ops->cache_all();
}
int smp4m_boot_one_cpu(int i, struct task_struct *idle)
{ unsignedlong *entry = &sun4m_cpu_startup; int timeout; int cpu_node;
/* See trampoline.S for details... */
entry += ((i - 1) * 3);
/* * Initialize the contexts table * Since the call to prom_startcpu() trashes the structure, * we need to re-initialize it for each cpu
*/
smp_penguin_ctable.which_io = 0;
smp_penguin_ctable.phys_addr = (unsignedint) srmmu_ctx_table_phys;
smp_penguin_ctable.reg_size = 0;
/* whirrr, whirrr, whirrrrrrrrr... */
printk(KERN_INFO "Starting CPU %d at %p\n", i, entry);
local_ops->cache_all();
prom_startcpu(cpu_node, &smp_penguin_ctable, 0, (char *)entry);
/* wheee... it's going... */ for (timeout = 0; timeout < 10000; timeout++) { if (cpu_callin_map[i]) break;
udelay(200);
}
if (!(cpu_callin_map[i])) {
printk(KERN_ERR "Processor %d is stuck.\n", i); return -ENODEV;
}
local_ops->cache_all(); return 0;
}
void __init smp4m_smp_done(void)
{ int i, first; int *prev;
/* setup cpu list for irq rotation */
first = 0;
prev = &first;
for_each_online_cpu(i) {
*prev = i;
prev = &cpu_data(i).next;
}
*prev = first;
local_ops->cache_all();
/* Ok, they are spinning and ready to go. */
}
staticvoid sun4m_send_ipi(int cpu, int level)
{
sbus_writel(SUN4M_SOFT_INT(level), &sun4m_irq_percpu[cpu]->set);
}
/* Init receive/complete mapping, plus fire the IPI's off. */
{ registerint i;
cpumask_clear_cpu(smp_processor_id(), &mask);
cpumask_and(&mask, cpu_online_mask, &mask); for (i = 0; i < ncpus; i++) { if (cpumask_test_cpu(i, &mask)) {
ccall_info.processors_in[i] = 0;
ccall_info.processors_out[i] = 0;
sun4m_send_ipi(i, IRQ_CROSS_CALL);
} else {
ccall_info.processors_in[i] = 1;
ccall_info.processors_out[i] = 1;
}
}
}
{ registerint i;
i = 0; do { if (!cpumask_test_cpu(i, &mask)) continue; while (!ccall_info.processors_in[i])
barrier();
} while (++i < ncpus);
i = 0; do { if (!cpumask_test_cpu(i, &mask)) continue; while (!ccall_info.processors_out[i])
barrier();
} while (++i < ncpus);
}
spin_unlock_irqrestore(&cross_call_lock, flags);
}
/* Running cross calls. */ void smp4m_cross_call_irq(void)
{ void (*func)(unsignedlong, unsignedlong, unsignedlong, unsignedlong, unsignedlong) = ccall_info.func; int i = smp_processor_id();
Die Informationen auf dieser Webseite wurden
nach bestem Wissen sorgfältig zusammengestellt. Es wird jedoch weder Vollständigkeit, noch Richtigkeit,
noch Qualität der bereit gestellten Informationen zugesichert.
Bemerkung:
Die farbliche Syntaxdarstellung und die Messung sind noch experimentell.